Commit 8e5f8e12 authored by Ben Huber's avatar Ben Huber

Merge branch 'v3' of hemioGit:xerus/xerus into v3

parents faa484ae aaa6102c
Pipeline #632 passed with stages
in 8 minutes and 7 seconds
......@@ -3,10 +3,11 @@
Potentially breaking changes are marked with an exclamation point '!' at the begin of their description.
* 2016-??-?? v3.0.0
* Python wrapper now stable.
* ! REQUIRE macro now logs as error instead of fatal error.
* ! All macros and preprocessor defines now use the XERUS_ prefix. The config.mk file changed accordingly.
* ! TT::find_largest_entry and TT::dyadic_product left the TT scope.
* UnitTests now with randomly seeded random engines.
* ! Tensor::modify_diag_elements renamed to Tensor::modify_diagonal_entries for naming consistency.
* Some minor bugfixes.
* 2016-06-23 v2.4.0
......
......@@ -123,6 +123,8 @@ namespace xerus {
void sort(const bool _positionsOnly);
void normalize();
void measure(const Tensor& _solution);
......
......@@ -40,8 +40,9 @@ namespace xerus {
//Forwad declarations
class TensorNetwork;
// TODO ugly, move it back to the end of the file.
// NOTE these two functions are used in the template functions of Tensor so they have to be declared before the class..
class Tensor;
/**
* @brief Low-level contraction between Tensors.
* @param _result Output for the result of the contraction.
......@@ -716,11 +717,6 @@ namespace xerus {
*/
void resize_mode(const size_t _mode, const size_t _newDim, size_t _cutPos=~0ul);
__attribute__((deprecated("function has been renamed. please use 'resize_mode'")))
void resize_dimension(const size_t _dimPos, const size_t _newDim, size_t _cutPos=~0ul) {
resize_mode(_dimPos, _newDim, _cutPos);
}
/**
* @brief Fixes a specific mode to a specific value, effectively reducing the order by one.
......@@ -729,11 +725,6 @@ namespace xerus {
*/
void fix_mode(const size_t _mode, const size_t _slatePosition);
__attribute__((deprecated("function has been renamed. please use 'fix_mode'")))
void fix_slate(const size_t _dimPos, const size_t _slatePosition) {
fix_mode(_dimPos, _slatePosition);
}
/**
* @brief Removes a single slate from the Tensor, reducing dimension[_mode] by one.
......@@ -756,7 +747,7 @@ namespace xerus {
* @details In this overload only the current diagonal entries are passed to @a _f, one at a time. At the moment this is only defined for matricies.
* @param _f the function to call to modify each entry.
*/
void modify_diag_elements(const std::function<void(value_t&)>& _f);
void modify_diagonal_entries(const std::function<void(value_t&)>& _f);
/**
......@@ -764,7 +755,7 @@ namespace xerus {
* @details In this overload the current diagonal entries are passed to @a _f, one at a time, together with their position on the diagonal. At the moment this is only defined for matricies.
* @param _f the function to call to modify each entry.
*/
void modify_diag_elements(const std::function<void(value_t&, const size_t)>& _f);
void modify_diagonal_entries(const std::function<void(value_t&, const size_t)>& _f);
/**
......@@ -772,7 +763,7 @@ namespace xerus {
* @details In this overload only the current entry is passed to @a _f.
* @param _f the function to call to modify each entry.
*/
void modify_elements(const std::function<void(value_t&)>& _f);
void modify_entries(const std::function<void(value_t&)>& _f);
/**
......@@ -780,7 +771,7 @@ namespace xerus {
* @details In this overload the current entry together with its position, assuming row-major ordering is passed to @a _f.
* @param _f the function to call to modify each entry.
*/
void modify_elements(const std::function<void(value_t&, const size_t)>& _f);
void modify_entries(const std::function<void(value_t&, const size_t)>& _f);
/**
......@@ -788,7 +779,7 @@ namespace xerus {
* @details In this overload the current entry together with its complete position is passed to @a _f.
* @param _f the function to call to modify each entry.
*/
void modify_elements(const std::function<void(value_t&, const MultiIndex&)>& _f);
void modify_entries(const std::function<void(value_t&, const MultiIndex&)>& _f);
/**
* @brief Adds the given Tensor with the given offsets to this one.
......@@ -1021,6 +1012,13 @@ namespace xerus {
*/
bool approx_entrywise_equal(const xerus::Tensor& _tensor, const std::vector<value_t>& _values, const xerus::value_t _eps = EPSILON);
/**
* @brief Prints the Tensor to the given outStream
* @param _out the outstream to be printed to.
* @param _tensor the tensor.
* @return @a _out.
*/
std::ostream& operator<<(std::ostream& _out, const Tensor& _tensor);
namespace misc {
/**
......
......@@ -230,7 +230,7 @@ namespace xerus {
* @param _nodeB The second node.
* @return Tuple containing the two positions in @a _nodeA and @a _nodeB.
*/
std::tuple<size_t, size_t> find_common_edge(const size_t _nodeA, const size_t _nodeB) const;
std::pair<size_t, size_t> find_common_edge(const size_t _nodeA, const size_t _nodeB) const;
......
......@@ -182,7 +182,7 @@ static misc::UnitTest tensor_qr_rq_rnd6("Tensor", "QR_AND_RQ_Random_Order_Six",
MTEST(frob_norm(Q(i,j,k,l)*Q(i,j,k,m) - Tensor::identity({R.dimensions[0], R.dimensions[0]})(l, m)) < 1e-12, " Q not orthogonal");
res4(l,m) = Q(i,j,k,l) * Q(i,j,k,m);
res4.modify_diag_elements([](value_t &entry){entry -= 1;});
res4.modify_diagonal_entries([](value_t &entry){entry -= 1;});
TEST(frob_norm(res4) < 1e-12);
(Q(i,j,k,l), R(l,m,n,r)) = QR(A(i,n,k,m,j,r));
......
......@@ -75,7 +75,7 @@ static misc::UnitTest tensor_solve_sparse("Tensor", "solve_sparse", [](){
std::mt19937_64 &rnd = xerus::misc::randomEngine;
std::normal_distribution<double> dist(0.0, 1.0);
const size_t N = 100;
std::uniform_int_distribution<size_t> eDist(1,N*N-1);
std::uniform_int_distribution<size_t> eDist(1, N*N-1);
Index i,j,k;
......@@ -100,17 +100,18 @@ static misc::UnitTest tensor_solve_sparse("Tensor", "solve_sparse", [](){
internal::CholmodSparse idt(id.get_unsanitized_sparse_data(), N, N, false);
Tensor id2({N,N});
id2.get_unsanitized_sparse_data() = idt.to_map();
MTEST(frob_norm(id-id2) < 1e-15, frob_norm(id-id2));
MTEST(frob_norm(id-id2) < 1e-12, frob_norm(id-id2));
Tensor fid(id);
fid.use_dense_representation();
Tensor fx;
// id.use_sparse_representation();
TEST(id.is_sparse());
fx(i) = r(j) / fid(j,i);
x(i) = r(j) / id(j,i);
MTEST(frob_norm(fx-x)/frob_norm(x)<1e-13, frob_norm(fx-x)/frob_norm(x));
MTEST(frob_norm(id(j,i)*x(i) - r(j))/frob_norm(x)<1e-12, frob_norm(id(j,i)*x(i) - r(j))/frob_norm(x));
MTEST(frob_norm(fid(j,i)*fx(i) - r(j))/frob_norm(x)<1e-12, frob_norm(fid(j,i)*fx(i) - r(j))/frob_norm(x));
MTEST(frob_norm(fx-x)/frob_norm(x)<1e-12, frob_norm(fx-x)/frob_norm(x));
});
static misc::UnitTest tensor_solve_trans("Tensor", "solve_transposed", [](){
......@@ -134,7 +135,7 @@ static misc::UnitTest tensor_solve_trans("Tensor", "solve_transposed", [](){
Tensor x1, x2;
x1(i) = r(j) / A(i,j);
x2(i) = r(j) / At(j,i);
MTEST(frob_norm(x1-x2) < 1e-14, "s " << frob_norm(x1-x2));
MTEST(frob_norm(x1-x2) < 1e-12, "s " << frob_norm(x1-x2));
A.use_dense_representation();
At.use_dense_representation();
......@@ -144,10 +145,10 @@ static misc::UnitTest tensor_solve_trans("Tensor", "solve_transposed", [](){
x4(i) = r(j) / At(j,i);
residual(i) = A(i,j) * (x3(j) - x4(j));
MTEST(frob_norm(x3-x4) < 1e-14, "d " << frob_norm(x3-x4) << " residual: " << frob_norm(residual));
MTEST(frob_norm(x3-x4) < 1e-12, "d " << frob_norm(x3-x4) << " residual: " << frob_norm(residual));
residual(i) = A(i,j) * (x1(j) - x3(j));
MTEST(frob_norm(x1-x3)/frob_norm(x1) < 5e-14, "sd " << frob_norm(x1-x3)/frob_norm(x1) << " residual: " << frob_norm(residual));
MTEST(frob_norm(x1-x3)/frob_norm(x1) < 1e-12, "sd " << frob_norm(x1-x3)/frob_norm(x1) << " residual: " << frob_norm(residual));
});
......@@ -170,7 +171,6 @@ static misc::UnitTest tensor_solve_matrix("Tensor", "solve_matrix", [](){
for(size_t xi = 0; xi < degN; ++xi) { nDims.push_back(n2Dist(rnd)); }
for(size_t xi = 0; xi < degP; ++xi) { pDims.push_back(n2Dist(rnd)); }
XERUS_LOG(bla, "Set " << mDims << "x" << nDims << " * " << pDims);
auto A = Tensor::random(mDims | nDims);
A *= realDist(rnd);
......@@ -190,7 +190,12 @@ static misc::UnitTest tensor_solve_matrix("Tensor", "solve_matrix", [](){
Tensor residual;
residual(i^degM, k^degP) = A(i^degM, j^degN)*X(j^degN, k^degP) - B(i^degM, k^degP);
MTEST(frob_norm(residual) < 1e-10, frob_norm(residual));
X(j^degN, k^degP) = B(i^degM, k^degP) / A(i^degM, j^degN); //solve_least_squares(X, A, B, degP);
residual(i^degM, k^degP) = A(i^degM, j^degN)*X(j^degN, k^degP) - B(i^degM, k^degP);
MTEST(frob_norm(residual) < 1e-10, frob_norm(residual));
}
});
......@@ -118,7 +118,7 @@ static misc::UnitTest tensor_dim_exp("Tensor", "dimension_expansion", [](){
TEST(C.size == 12);
});
static misc::UnitTest tensor_modify_elem("Tensor", "modify_elements", [](){
static misc::UnitTest tensor_modify_elem("Tensor", "modify_entries", [](){
Tensor A({4,4});
Tensor C({2,8});
......@@ -139,18 +139,18 @@ static misc::UnitTest tensor_modify_elem("Tensor", "modify_elements", [](){
A[{3,2}] = 15;
A[{3,3}] = 16;
A.modify_diag_elements([](value_t& _entry){});
A.modify_diagonal_entries([](value_t& _entry){});
TEST(approx_entrywise_equal(A, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}));
A.modify_diag_elements([](value_t& _entry){_entry = 73.5*_entry;});
A.modify_diagonal_entries([](value_t& _entry){_entry = 73.5*_entry;});
TEST(approx_entrywise_equal(A, {73.5*1,2,3,4,5,73.5*6,7,8,9,10,73.5*11,12,13,14,15,73.5*16}));
A.modify_diag_elements([](value_t& _entry, const size_t _position){_entry = 73.5*_entry - static_cast<value_t>(_position);});
A.modify_diagonal_entries([](value_t& _entry, const size_t _position){_entry = 73.5*_entry - static_cast<value_t>(_position);});
TEST(approx_entrywise_equal(A, {73.5*73.5*1,2,3,4,5,73.5*73.5*6-1.0,7,8,9,10,73.5*73.5*11-2.0,12,13,14,15,73.5*73.5*16-3.0}));
A.reinterpret_dimensions({2,8});
A.modify_diag_elements([](value_t& _entry){_entry = 0;});
A.modify_diagonal_entries([](value_t& _entry){_entry = 0;});
TEST(approx_entrywise_equal(A, {0,2,3,4,5,73.5*73.5*6-1.0,7,8,9,0,73.5*73.5*11-2.0,12,13,14,15,73.5*73.5*16-3.0}));
});
......
......@@ -206,7 +206,7 @@ static misc::UnitTest tttv_creation("TTTangentVector", "creation", [](){
scalarProdInTangentSpace = tangentChange1.scalar_product(tangentChange2);
scalarProdInEmbeddingSpace = value_t(TTTensor(tangentChange1)(j&0) * TTTensor(tangentChange2)(j&0));
MTEST(misc::approx_equal(scalarProdInEmbeddingSpace, scalarProdInTangentSpace, 1e-14),
MTEST(misc::approx_equal(scalarProdInEmbeddingSpace, scalarProdInTangentSpace, 1e-10),
"scalarProd " << scalarProdInEmbeddingSpace << " " << scalarProdInTangentSpace << " diff " << (scalarProdInEmbeddingSpace-scalarProdInTangentSpace));
// test whether copy assignment, += and * work as intended
......
......@@ -121,7 +121,7 @@ static misc::UnitTest sparse_dim_exp("SparseTensor", "dimension_expansion", []()
TEST(C.size == 12);
});
static misc::UnitTest sparse_mod("SparseTensor", "modify_elements", [](){
static misc::UnitTest sparse_mod("SparseTensor", "modify_entries", [](){
Tensor A({4,4}, Tensor::Representation::Sparse);
Tensor B({4,4,7}, Tensor::Representation::Sparse);
Tensor C({2,8}, Tensor::Representation::Sparse);
......@@ -144,18 +144,18 @@ static misc::UnitTest sparse_mod("SparseTensor", "modify_elements", [](){
A[{3,3}] = 16;
A.use_sparse_representation();
A.modify_diag_elements([](value_t& _entry){});
A.modify_diagonal_entries([](value_t& _entry){});
TEST(approx_entrywise_equal(A, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}));
A.modify_diag_elements([](value_t& _entry){_entry = 73.5*_entry;});
A.modify_diagonal_entries([](value_t& _entry){_entry = 73.5*_entry;});
TEST(approx_entrywise_equal(A, {73.5*1,2,3,4,5,73.5*6,7,8,9,10,73.5*11,12,13,14,15,73.5*16}));
A.modify_diag_elements([](value_t& _entry, const size_t _position){_entry = 73.5*_entry - static_cast<value_t>(_position);});
A.modify_diagonal_entries([](value_t& _entry, const size_t _position){_entry = 73.5*_entry - static_cast<value_t>(_position);});
TEST(approx_entrywise_equal(A, {73.5*73.5*1,2,3,4,5,73.5*73.5*6-1.0,7,8,9,10,73.5*73.5*11-2.0,12,13,14,15,73.5*73.5*16-3.0}));
A.reinterpret_dimensions({2,8});
A.modify_diag_elements([](value_t& _entry){_entry = 0;});
A.modify_diagonal_entries([](value_t& _entry){_entry = 0;});
TEST(approx_entrywise_equal(A, {0,2,3,4,5,73.5*73.5*6-1.0,7,8,9,0,73.5*73.5*11-2.0,12,13,14,15,73.5*73.5*16-3.0}));
});
......
......@@ -378,23 +378,23 @@ static misc::UnitTest tt_opt("TT", "operator_times_tensor", [](){
Index i,j,k,l,m;
ttD(i^2) = ttA(i^2,j^2) * ttB(j^2,k^2) * ttC(k^2);
D(i^2) = A(i^2,j^2) * B(j^2,k^2) * C(k^2);
MTEST(approx_equal(D, Tensor(ttD), 3e-15), "1 " << frob_norm(D-Tensor(ttD)) << " / " << frob_norm(D));
MTEST(approx_equal(D, Tensor(ttD), 3e-13), "1 " << frob_norm(D-Tensor(ttD)) << " / " << frob_norm(D));
ttD(i^2) = ttA(i^2,j^2) * ttB(k^2,j^2) * ttC(k^2);
D(i^2) = A(i^2,j^2) * B(k^2,j^2) * C(k^2);
MTEST(approx_equal(D, Tensor(ttD), 2e-15), "2 " << frob_norm(D-Tensor(ttD)) << " / " << frob_norm(D));
MTEST(approx_equal(D, Tensor(ttD), 2e-13), "2 " << frob_norm(D-Tensor(ttD)) << " / " << frob_norm(D));
ttDo(i^2,k^2) = ttA(i^2,j^2) * ttB(j^2,k^2);
Do(i^2,k^2) = A(i^2,j^2) * B(j^2,k^2);
MTEST(approx_equal(Do, Tensor(ttDo), 2e-15), "3 " << frob_norm(Do-Tensor(ttDo)) << " / " << frob_norm(Do));
MTEST(approx_equal(Do, Tensor(ttDo), 2e-13), "3 " << frob_norm(Do-Tensor(ttDo)) << " / " << frob_norm(Do));
ttDo(i^2,k^2) = ttA(i^2,j^2) * ttA(j^2,k^2);
Do(i^2,k^2) = A(i^2,j^2) * A(j^2,k^2);
MTEST(approx_equal(Do, Tensor(ttDo), 2e-15), "4 " << frob_norm(Do-Tensor(ttDo)) << " / " << frob_norm(Do));
MTEST(approx_equal(Do, Tensor(ttDo), 2e-13), "4 " << frob_norm(Do-Tensor(ttDo)) << " / " << frob_norm(Do));
ttDo(i^2,l^2) = ttA(i^2,j^2) * ttB(j^2,k^2) * ttA(l^2,k^2);
Do(i^2,l^2) = A(i^2,j^2) * B(j^2,k^2) * A(l^2,k^2);
MTEST(approx_equal(Do, Tensor(ttDo), 2e-15), "5 " << frob_norm(Do-Tensor(ttDo)) << " / " << frob_norm(Do));
MTEST(approx_equal(Do, Tensor(ttDo), 2e-13), "5 " << frob_norm(Do-Tensor(ttDo)) << " / " << frob_norm(Do));
ttDo(i^2,l^2) = ttA(i^2,j^2) * ttB(j^2,k^2) * ttB(l^2,k^2);
Do(i^2,l^2) = A(i^2,j^2) * B(j^2,k^2) * B(l^2,k^2);
......@@ -402,7 +402,7 @@ static misc::UnitTest tt_opt("TT", "operator_times_tensor", [](){
ttDo(i^2,m^2) = ttA(i^2,j^2) * ttB(j^2,k^2) * ttB(l^2,k^2) * ttA(l^2,m^2);
Do(i^2,m^2) = A(i^2,j^2) * B(j^2,k^2) * B(l^2,k^2) * A(l^2,m^2);
MTEST(approx_equal(Do, Tensor(ttDo), 2e-15), "7 " << frob_norm(Do-Tensor(ttDo)) << " / " << frob_norm(Do));
MTEST(approx_equal(Do, Tensor(ttDo), 2e-13), "7 " << frob_norm(Do-Tensor(ttDo)) << " / " << frob_norm(Do));
});
static misc::UnitTest tt_fullcontr("TT", "full_contraction", [](){
......
......@@ -121,7 +121,7 @@ static misc::UnitTest alg_adf_rnd("Algorithm", "adf_random_low_rank", [](){
}
TEST(test);
ADFVariant ourADF(500, 1e-6, 1e-6);
ADFVariant ourADF(500, 1e-6, 0.999);
TTTensor X = TTTensor::ones(std::vector<size_t>(D, N));
PerformanceData perfData([&](const TTTensor& _x) {return frob_norm(_x - trueSolution)/frob_norm(trueSolution);}, true, false);
......
......@@ -4,8 +4,8 @@
using namespace xerus;
static misc::UnitTest tt_entryprod("TT", "entrywise_product", [](){
Index i,j,k;
Index i,j,k;
TTTensor A = TTTensor::random(std::vector<size_t>(10,2), std::vector<size_t>(9,2));
TTTensor B = TTTensor::random(std::vector<size_t>(10,2), std::vector<size_t>(9,2));
......@@ -34,8 +34,8 @@ static misc::UnitTest tt_entryprod("TT", "entrywise_product", [](){
});
static misc::UnitTest tt_soft("TT", "soft_thresholding", [](){
Index i,j,k;
Index i,j,k;
TTTensor A = TTTensor::random(std::vector<size_t>(10,2), std::vector<size_t>(9,2));
TTTensor B = TTTensor::random(std::vector<size_t>(10,2), std::vector<size_t>(9,2));
......@@ -53,3 +53,30 @@ static misc::UnitTest tt_soft("TT", "soft_thresholding", [](){
TEST(frob_norm(Cf - Tensor(Co))/frob_norm(Cf) < 1e-14);
TEST(frob_norm(Cf - Tensor(C))/frob_norm(Cf) < 1e-14);
});
static misc::UnitTest tt_pseudo_inv("TT", "Non-operator Pseudo Inverse", [](){
Index i,j,k,r1,r2,r3,r4;
const size_t d = 2;
const TTTensor op = TTTensor::random(std::vector<size_t>(2*d, 10), std::vector<size_t>(2*d-1, 4));
TTTensor tmp = op;
tmp.move_core(d);
auto parts = tmp.chop(d);
Tensor U,S,V;
(U(i,r1), S(r1,r2), V(r2,j^2)) = SVD(tmp.get_component(d)(i,j^2));
S.modify_diagonal_entries([](double &_v){
if (_v>1e-10) {
_v = 1/_v;
}
});
TensorNetwork pInv;
pInv(j, i^(d-1), k^d) = parts.first(k^d,r1) * U(r1,r2) * S(r2,r3) * V(r3,j,r4) * parts.second(r4,i^(d-1));
double error = frob_norm(pInv(i^d,r1^d)*op(r1^d,r2^d)*pInv(r2^d,j^d) - pInv(i^d,j^d));
MTEST(error < 1e-10, "A^+ A A^+ != A^+ ? " << error);
error = frob_norm(op(i^d,r1^d)*pInv(r1^d,r2^d)*op(r2^d,j^d) - op(i^d,j^d));
MTEST(error < 1e-10, "A A^+ A != A ? " << error);
});
......@@ -488,7 +488,7 @@ namespace xerus {
template<class MeasurmentSet>
void ADFVariant::InternalSolver<MeasurmentSet>::solve_with_current_ranks() {
double resDec1 = 0.0, resDec2 = 0.0;
double resDec1 = 0.0, resDec2 = 0.0, resDec3 = 0.0;
for(; maxIterations == 0 || iteration < maxIterations; ++iteration) {
......@@ -514,10 +514,10 @@ namespace xerus {
perfData.add(iteration, residualNorm, x, 0);
// Check for termination criteria
double resDec3 = resDec2; resDec2 = resDec1;
resDec1 = residualNorm/lastResidualNorm;
LOG(wup, resDec1*resDec2*resDec3);
if(residualNorm < targetResidualNorm || resDec1*resDec2*resDec3 > misc::pow(minimalResidualNormDecrease, 3)) { break; }
double resDec4 = resDec3; resDec3 = resDec2; resDec2 = resDec1;
resDec1 = residualNorm/lastResidualNorm;
// LOG(wup, resDec1*resDec2*resDec3*resDec4);
if(residualNorm < targetResidualNorm || resDec1*resDec2*resDec3*resDec4 > misc::pow(minimalResidualNormDecrease, 4)) { break; }
// Sweep from the first to the last component
......@@ -541,6 +541,8 @@ namespace xerus {
}
}
template<class MeasurmentSet>
double ADFVariant::InternalSolver<MeasurmentSet>::solve() {
perfData.start();
......@@ -563,10 +565,24 @@ namespace xerus {
// If we follow a rank increasing strategie, increase the ransk until we reach the targetResidual, the maxRanks or the maxIterations.
while(residualNorm > targetResidualNorm && x.ranks() != maxRanks && (maxIterations == 0 || iteration < maxIterations)) {
// LOG(xRanKResBefore, measurments.test(x));
// Increase the ranks
auto rndTensor = TTTensor::random(x.dimensions, std::vector<size_t>(x.degree()-1, 1));
x = x+(1e-10*frob_norm(x)/frob_norm(rndTensor))*rndTensor;
x.move_core(0, true);
const auto rndTensor = TTTensor::random(x.dimensions, std::vector<size_t>(x.degree()-1, 1));
const auto oldX = x;
auto diff = (1.0/frob_norm(rndTensor))*rndTensor;
// LOG(bla, frob_norm(diff) << " x " << frob_norm(x) << " b " << normMeasuredValues);
for(size_t i = 0; i < diff.degree(); ++i) {
diff.component(i).apply_factor();
}
// LOG(diff1, measurments.test(diff));
diff *= normMeasuredValues*1e-5;
// LOG(diff2, measurments.test(diff));
x = x+diff;
// LOG(realDifference, frob_norm(x -oldX) << " x " << frob_norm(x));
// LOG(xRanKResAfter, measurments.test(x));
x.round(maxRanks);
......
......@@ -540,7 +540,7 @@ namespace xerus {
std::unique_ptr<int[]> pivot(new int[_n]);
misc::set_zero(pivot.get(), _n);
// std::unique_ptr<double[]> signulars(new double[std::min(_n, _m)]);
std::unique_ptr<double[]> signulars(new double[std::min(_n, _m)]);
int rank;
......@@ -553,20 +553,7 @@ namespace xerus {
misc::set_zero(bOrX+_m*_p, (_n-_m)*_p); // Lapacke is unhappy if the array contains NANs...
}
IF_CHECK( int lapackAnswer = ) LAPACKE_dgelsy(
LAPACK_ROW_MAJOR,
static_cast<int>(_m), // Left dimension of A
static_cast<int>(_n), // Right dimension of A
static_cast<int>(_p), // Number of rhss
_A, // Matrix A
static_cast<int>(_n), // LDA
bOrX, // On input b, on output x
static_cast<int>(_p), // LDB
pivot.get(), // Pivot, entries must be zero to allow pivoting
xerus::EPSILON, // Used to determine the accuracy of the Lapacke call. Basically all singular values smaller than RCOND*s[0] are ignored. (s[0] is the largest signular value)
&rank); // Outputs the rank of A
// IF_CHECK( int lapackAnswer = ) LAPACKE_dgelsd(
// IF_CHECK( int lapackAnswer = ) LAPACKE_dgelsy(
// LAPACK_ROW_MAJOR,
// static_cast<int>(_m), // Left dimension of A
// static_cast<int>(_n), // Right dimension of A
......@@ -574,11 +561,24 @@ namespace xerus {
// _A, // Matrix A
// static_cast<int>(_n), // LDA
// bOrX, // On input b, on output x
// static_cast<int>(_p), // LDB
// signulars.get(), // Pivot, entries must be zero to allow pivoting
// static_cast<int>(_p), // LDB
// pivot.get(), // Pivot, entries must be zero to allow pivoting
// xerus::EPSILON, // Used to determine the accuracy of the Lapacke call. Basically all singular values smaller than RCOND*s[0] are ignored. (s[0] is the largest signular value)
// &rank); // Outputs the rank of A
IF_CHECK( int lapackAnswer = ) LAPACKE_dgelsd(
LAPACK_ROW_MAJOR,
static_cast<int>(_m), // Left dimension of A
static_cast<int>(_n), // Right dimension of A
static_cast<int>(_p), // Number of rhss
_A, // Matrix A
static_cast<int>(_n), // LDA
bOrX, // On input b, on output x
static_cast<int>(_p), // LDB
signulars.get(), // Pivot, entries must be zero to allow pivoting
xerus::EPSILON, // Used to determine the accuracy of the Lapacke call. Basically all singular values smaller than RCOND*s[0] are ignored. (s[0] is the largest signular value)
&rank); // Outputs the rank of A
CHECK(lapackAnswer == 0, error, "Unable to solves min ||Ax - b||_2 for x. Lapacke says: " << lapackAnswer << " sizes are " << _m << " x " << _n << " * " << _p);
if(_m >= _n) { // I.e. bOrX is _b
......
......@@ -251,7 +251,6 @@ namespace xerus {
_out.assign_indices();
// Assign the dimensions
_base.assign_index_dimensions();
_out.assign_index_dimensions();
......
......@@ -22,156 +22,59 @@
* @brief Implementation of the indexed tensor / operator.
*/
#include <xerus/misc/internal.h>
#include <xerus/misc/containerSupport.h>
#include <xerus/indexedTensor_tensor_factorisations.h>
#include <xerus/index.h>
#include <xerus/indexedTensor.h>
#include <xerus/indexedTensorMoveable.h>
#include <xerus/tensor.h>
#include <xerus/blasLapackWrapper.h>
#include <xerus/cholmod_wrapper.h>
namespace xerus {
void solve(internal::IndexedTensorWritable<Tensor>&& _x, internal::IndexedTensorReadOnly<Tensor>&& _a, internal::IndexedTensorReadOnly<Tensor>&& _b) {
// x takes the dimensions of A -- also ensures that every index of x is contained in A
_x.tensorObject->reset(_a.get_evaluated_dimensions(_x.indices), Tensor::Initialisation::None);
_a.assign_indices();
internal::IndexedTensorMoveable<Tensor> operator/ (internal::IndexedTensorReadOnly<Tensor>&& _b, internal::IndexedTensorReadOnly<Tensor>&& _A) {
_A.assign_indices();
_b.assign_indices();
// IF_CHECK( _x.check_indices(false); )
// for(size_t i = 0; i < bAssIndices.numIndices; ++i) {
// REQUIRE(!bAssIndices.indexOpen[i] || contains(AAssIndices.indices, bAssIndices.indices[i]), "Every open index of b must be contained in A.");
// REQUIRE(!contains(_x.indices, bAssIndices.indices[i]), "x and b must not have indices in common.");
// }
// If possible we don't want to reorder A
size_t extraDims = 0;
std::vector<Index> orderA;
std::vector<Index> orderB;
std::vector<Index> orderX;
std::vector<size_t> dimensionsA;
std::vector<size_t> dimensionsB;
std::vector<size_t> dimensionsX;
size_t dimensionsCount = 0;
for(const Index& idx : _a.indices) {
// If possible we don't want to reorder A, so first divide A into those shared with b and x.
for(const Index& idx : _A.indices) {
if(misc::contains(_b.indices, idx)) {
orderA.push_back(idx);
orderB.push_back(idx);
for(size_t i = 0; i < idx.span; ++i) {
dimensionsA.push_back(_a.tensorObjectReadOnly->dimensions[dimensionsCount]);
dimensionsB.push_back(_a.tensorObjectReadOnly->dimensions[dimensionsCount]);
dimensionsCount++;
}
} else {
orderX.push_back(idx);
for(size_t i = 0; i < idx.span; ++i) {
dimensionsX.push_back(_a.tensorObjectReadOnly->dimensions[dimensionsCount++]);
}
}
}
orderA.insert(orderA.end(), orderX.begin(), orderX.end());
dimensionsA.insert(dimensionsA.end(), dimensionsX.begin(), dimensionsX.end());
//Save slot for eventual tmpX and tmpB
std::unique_ptr<internal::IndexedTensor<Tensor>> saveSlotX;
internal::IndexedTensorWritable<Tensor>* usedX;
std::unique_ptr<internal::IndexedTensor<Tensor>> saveSlotB;
internal::IndexedTensorReadOnly<Tensor>* usedB;
// We need tmp objects for A and b, because Lapacke wants to destroys the input
// and we need to cast b if it is sparse as we cannot handle that yet
if (!_a.tensorObjectReadOnly->is_sparse()) {
saveSlotB.reset(new internal::IndexedTensor<Tensor>(
new Tensor(std::move(dimensionsB), Tensor::Representation::Dense, Tensor::Initialisation::None), orderB, true
));
evaluate(std::move(*saveSlotB), std::move(_b));
saveSlotB->tensorObject->use_dense_representation();
usedB = saveSlotB.get();
} else {
usedB = &_b;
}
bool sparseResult = (_a.tensorObjectReadOnly->is_sparse() && usedB->tensorObjectReadOnly->is_sparse());
if (orderX != _x.indices || (_x.tensorObject->is_sparse() != sparseResult)) {
if (sparseResult) {
saveSlotX.reset(new internal::IndexedTensor<Tensor>(
new Tensor(std::move(dimensionsX), Tensor::Representation::Sparse, Tensor::Initialisation::None), orderX, true));
} else {
saveSlotX.reset(new internal::IndexedTensor<Tensor>(
new Tensor(std::move(dimensionsX), Tensor::Representation::Dense, Tensor::Initialisation::None), orderX, true));
}
usedX = saveSlotX.get();
} else {
usedX = &_x;
}
// Assume A is an MxN matrix
const size_t M = usedB->tensorObjectReadOnly->size;
const size_t N = usedX->tensorObjectReadOnly->size;
// This far the dimensions of A and B coincide
orderB = orderA;
internal::IndexedTensor<Tensor> tmpA(new Tensor(std::move(dimensionsA), _a.tensorObjectReadOnly->representation, Tensor::Initialisation::None), orderA, true);
evaluate(std::move(tmpA), std::move(_a));
// Add the second part of indices in the order obtained for X
orderA.insert(orderA.end(), orderX.begin(), orderX.end());
if (tmpA.tensorObjectReadOnly->is_sparse()) {
if (usedB->tensorObjectReadOnly->is_sparse()) {
internal::CholmodSparse::solve_sparse_rhs(
usedX->tensorObject->get_unsanitized_sparse_data(), N,
tmpA.tensorObjectReadOnly->get_unsanitized_sparse_data(), false,
usedB->tensorObjectReadOnly->get_unsanitized_sparse_data(), M);
} else {
internal::CholmodSparse::solve_dense_rhs(
usedX->tensorObject->get_unsanitized_dense_data(), N,