Commit 9534f71f authored by Sebastian Wolf's avatar Sebastian Wolf

Merge branch 'v3' of git:xerus/xerus into v3

parents 8f69feb1 69ec467e
Pipeline #641 failed with stages
in 3 minutes and 41 seconds
......@@ -37,6 +37,7 @@ namespace xerus {
class ALSVariant {
public:
enum Direction { Increasing, Decreasing };
protected:
double solve(const TTOperator *_Ap, TTTensor &_x, const TTTensor &_b, size_t _numHalfSweeps, value_t _convergenceEpsilon, PerformanceData &_perfData = NoPerfData) const;
......
......@@ -25,44 +25,62 @@
using namespace xerus;
static misc::UnitTest als_id("ALS", "identity", [](){
Index k,l,m,n,o,p;
Index k,l,m,n,o,p;
Tensor X({10, 10, 10});
Tensor B = Tensor::random({10, 10, 10});
Tensor I({10,10,10,10,10,10}, [](const std::vector<size_t> &_idx) {
Tensor X({10, 10, 10});
Tensor B = Tensor::random({10, 10, 10});
Tensor I({10,10,10,10,10,10}, [](const std::vector<size_t> &_idx) {
if (_idx[0]==_idx[3] && _idx[1] == _idx[4] && _idx[2] == _idx[5]) {
return 1.0;
} else {
return 0.0;
}
});
X(k^3) = I(k^3,l^3)*B(l^3);
TEST(frob_norm(X(k^3) - B(k^3)) < 1e-13);
TTTensor ttB(B, 0.001);
TTTensor ttX(X, 0.001);
TTOperator ttI(I, 0.001);
X(k^3) = I(k^3,l^3)*B(l^3);
TEST(frob_norm(X(k^3) - B(k^3)) < 1e-13);
TTTensor ttB(B, 0.001);
TTTensor ttX(X, 0.001);
TTOperator ttI(I, 0.001);
ttX(k^3) = ttI(k^3,l^3)*ttB(l^3);
TEST(frob_norm(ttI(k^3,l^3)*ttB(l^3) - ttB(k^3)) < 1e-13);
TEST(frob_norm(ttI(k^3,l^3)*ttB(l^3) - ttB(k^3)) < 1e-13);
TEST(frob_norm(ttI(k^3,l^3)*ttX(l^3) - ttX(k^3)) < 1e-13);
PerformanceData perfdata;
PerformanceData perfdata;
value_t result = ALS_SPD(ttI, ttX, ttB, 0.001, perfdata);
MTEST(result < 0.01, "1 " << result);
MTEST(result < 0.01, "1 " << result);
MTEST(frob_norm(ttX - ttB) < 1e-13 * 1000, "1 " << frob_norm(ttX - ttB));
perfdata.reset();
perfdata.reset();
ttX = TTTensor::random(ttX.dimensions, ttX.ranks());
ttX = TTTensor::random(ttX.dimensions, ttX.ranks());
result = ALS_SPD(ttI, ttX, ttB, 0.001, perfdata);
MTEST(result < 0.01, "2 " << result);
MTEST(result < 0.01, "2 " << result);
MTEST(frob_norm(ttX - ttB) < 1e-9, "2 " << frob_norm(ttX - ttB)); // approx 1e-16 * dim * max_entry
});
static misc::UnitTest als_real("ALS", "real", []() {
std::normal_distribution<double> dist(0, 1);
Index k,l;
TTTensor x = TTTensor::random({10, 10, 10, 10, 10}, {3,3,3,3});
TTTensor realX = TTTensor::random({10, 10, 10, 10, 10}, {3,3,3,3});
TTOperator A = TTOperator::random({10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, {5,5,5,5});
TTTensor b;
b(k&0) = A(k/2,l/2)*realX(l&0);
const value_t result = ALS(A, x, b, 1e-6);
MTEST(result < 0.001, result);
MTEST(frob_norm(x - realX) < 1e-4, frob_norm(x - realX));
});
static misc::UnitTest als_proj("ALS", "projectionALS", [](){
Index k,l,m,n,o,p;
......
......@@ -40,15 +40,18 @@ namespace xerus {
// local solvers
// -------------------------------------------------------------------------------------------------------------------------
void ALSVariant::lapack_solver(const TensorNetwork &_A, std::vector<Tensor> &_x, const TensorNetwork &_b, const ALSAlgorithmicData &_data) {
void ALSVariant::lapack_solver(const TensorNetwork& _A, std::vector<Tensor>& _x, const TensorNetwork& _b, const ALSAlgorithmicData& _data) {
Tensor A(_A);
Tensor b(_b);
Tensor x;
solve_least_squares(x, A, b, 0);
Index i,j,k,l;
x(i&0) = b(j&0) / A(j/2, i/2);
if (_data.direction == Increasing) {
Tensor U, S;
for (size_t p=0; p+1<_data.ALS.sites; ++p) {
for (size_t p = 0; p+1 < _data.ALS.sites; ++p) {
Tensor U, S;
// calculate_svd(U, S, x, x, 2, _data.targetRank[_data.currIndex+p], EPSILON); TODO
(U(i^2,j), S(j,k), x(k,l&1)) = SVD(x(i^2,l&2), _data.targetRank[_data.currIndex+p]);
_x[p] = std::move(U);
x(j,l&1) = S(j,k) * x(k,l&1);
......@@ -56,8 +59,9 @@ namespace xerus {
_x.back() = std::move(x);
} else {
// direction: decreasing index
Tensor S, Vt;
for (size_t p=_data.ALS.sites-1; p>0; --p) {
for (size_t p = _data.ALS.sites-1; p>0; --p) {
Tensor S, Vt;
// calculate_svd(x, S, Vt, x, x.degree()-1, _data.targetRank[_data.currIndex+p-1], EPSILON); TODO
(x(i&1,j), S(j,k), Vt(k,l&1)) = SVD(x(i&2,l^2), _data.targetRank[_data.currIndex+p-1]);
_x[p] = std::move(Vt);
x(i&1,k) = x(i&1,j) * S(j,k);
......@@ -211,7 +215,7 @@ namespace xerus {
}
void ALSVariant::ALSAlgorithmicData::prepare_stacks() {
const size_t d=x.degree();
const size_t d = x.degree();
Index r1,r2;
Tensor tmpA;
......@@ -381,7 +385,7 @@ namespace xerus {
Index cr1, cr2, cr3, cr4, r1, r2, r3, r4, n1, n2, n3, n4, x;
TensorNetwork ATilde = _data.localOperatorCache.left.back();
if (assumeSPD) {
for (size_t p=0; p<sites; ++p) {
for (size_t p=0; p<sites; ++p) {
ATilde(n1^(p+1), n2, r2, n3^(p+1), n4) = ATilde(n1^(p+1), r1, n3^(p+1)) * _data.A->get_component(_data.currIndex+p)(r1, n2, n4, r2);
}
ATilde(n1^(sites+1), n2, n3^(sites+1), n4) = ATilde(n1^(sites+1), r1, n3^(sites+1)) * _data.localOperatorCache.right.back()(n2, r1, n4);
......@@ -396,6 +400,7 @@ namespace xerus {
return ATilde;
}
TensorNetwork ALSVariant::construct_local_RHS(ALSVariant::ALSAlgorithmicData& _data) const {
Index cr1, cr2, cr3, cr4, r1, r2, r3, r4, n1, n2, n3, n4, x;
TensorNetwork BTilde;
......@@ -448,8 +453,7 @@ namespace xerus {
|| (_data.optimizedRange.second - _data.optimizedRange.first<=sites))
{
// we are done! yay
LOG(ALS, "ALS done, " << _data.energy << " " << _data.lastEnergy << " "
<< std::abs(_data.lastEnergy2-_data.energy) << " " << std::abs(_data.lastEnergy-_data.energy) << " < " << _convergenceEpsilon);
LOG(ALS, "ALS done, " << _data.energy << " " << _data.lastEnergy << " " << std::abs(_data.lastEnergy2-_data.energy) << " " << std::abs(_data.lastEnergy-_data.energy) << " < " << _convergenceEpsilon);
if (_data.cannonicalizeAtTheEnd && preserveCorePosition) {
_data.x.move_core(_data.corePosAtTheEnd, true);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment