Commit 080e222b authored by Sebastian Wolf's avatar Sebastian Wolf

Work on UQ ADF

parent f4ce1fc8
Pipeline #674 failed with stages
in 3 minutes and 43 seconds
......@@ -58,6 +58,7 @@
#include "xerus/algorithms/cg.h"
#include "xerus/algorithms/decompositionAls.h"
#include "xerus/algorithms/adf.h"
#include "xerus/algorithms/uqAdf.h"
#include "xerus/algorithms/iht.h"
#include "xerus/algorithms/crossApproximation.h"
#include "xerus/algorithms/largestEntry.h"
......
......@@ -235,7 +235,7 @@ namespace xerus {
double minimalResidualNormDecrease; // The minimal relative decrease of the residual per step ( i.e. (lastResidual-residual)/lastResidual ). If the avg. of the last three steps is smaller than this value, the algorithm stops.
/// fully defining constructor. alternatively ALSVariants can be created by copying a predefined variant and modifying it
ADFVariant(const size_t _maxIteration, const double _targetResidual, const double _minimalResidualDecrease)
ADFVariant(const size_t _maxIteration, const double _targetResidual, const double _minimalResidualDecrease)
: maxIterations(_maxIteration), targetResidualNorm(_targetResidual), minimalResidualNormDecrease(_minimalResidualDecrease) { }
/**
......
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2017 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
* @file
* @brief Header file for the IHT algorithm and its variants.
*/
#pragma once
#include "../ttNetwork.h"
namespace xerus {
TTTensor randomTTSVD(const Tensor& _x, const std::vector<size_t>& _ranks) {
std::random_device rd;
std::mt19937 rnd(rd());
std::normal_distribution<double> dist(0, 1);
const size_t d = _x.degree();
TTTensor u(d);
Tensor b = _x;
for(long j = d; j--; j >= 2) {
std::vector<size_t> gDims(_b.dimensions.cbegin(), _b.dimensions.begin()+(d-1));
Tensor g(gDims, Tensor::Representation::Sparse);
const auto& data = b.get_unsanitized_sparse_data();
for(const auto& entry : data) {
auto pos = Tensor::position_to_multiIndex(entry.first, b.dimensions);
pos.pop_back();
g[pos] = dist(rnd);
}
Tensor a;
contract(a, g, false, b, false, j-1);
Tensor R, Q;
calculate_rq(R, Q, a, 1);
u.set_component(j, Q);
if(j == d) {
contract(b, b, false, Q, true, 1);
} else {
contract(b, b, false, Q, true, 2);
}
}
u.set_component(1, b);
}
}
// // Xerus - A General Purpose Tensor Library
// // Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf.
// //
// // Xerus is free software: you can redistribute it and/or modify
// // it under the terms of the GNU Affero General Public License as published
// // by the Free Software Foundation, either version 3 of the License,
// // or (at your option) any later version.
// //
// // Xerus is distributed in the hope that it will be useful,
// // but WITHOUT ANY WARRANTY; without even the implied warranty of
// // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// // GNU Affero General Public License for more details.
// //
// // You should have received a copy of the GNU Affero General Public License
// // along with Xerus. If not, see <http://www.gnu.org/licenses/>.
// //
// // For further information on Xerus visit https://libXerus.org
// // or contact us at contact@libXerus.org.
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf.
//
// /**
// * @file
// * @brief Header file for the ADF algorithm and its variants.
// */
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// #pragma once
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// #include "../index.h"
// #include "../ttNetwork.h"
// #include "../performanceData.h"
// #include "../measurments.h"
//
// namespace xerus {
//
//
//
// class UQADF {
// ///@brief Indices for all internal functions.
// const Index r1, r2, i1;
//
// ///@brief Reference to the current solution (external ownership)
// TTTensor& x;
//
// ///@brief Degree of the solution.
// const size_t degree;
//
// ///@brief Maximally allowed ranks.
// const std::vector<size_t> maxRanks;
//
// ///@brief Number of measurments (i.e. measurments.size())
// const size_t numMeasurments;
//
// ///@brief The two norm of the measured values
// const value_t normMeasuredValues;
//
// ///@brief Maximal allowed number of iterations (one iteration = one sweep)
// const size_t maxIterations;
//
// ///@brief The target residual norm at which the algorithm shall stop.
// const double targetResidualNorm;
//
// ///@brief Minimal relative decrease of the residual norm ( (oldRes-newRes)/oldRes ) until either the ranks are increased (if allowed) or the algorithm stops.
// const double minimalResidualNormDecrease;
//
// ///@brief The current iteration.
// size_t iteration;
//
// ///@brief Current residual norm. Updated at the beginning of each iteration.
// double residualNorm;
//
// ///@brief The residual norm of the last iteration.
// double lastResidualNorm;
//
// ///@brief The current residual, saved as vector (instead of a order one tensor).
// std::vector<value_t> residual;
//
// ///@brief The current projected Gradient component. That is E(A^T(Ax-b))
// Tensor projectedGradientComponent;
//
// ///@brief Ownership holder for a (degree+2)*numMeasurments array of Tensor pointers. (Not used directly)
// std::unique_ptr<Tensor*[]> forwardStackMem;
//
// /** @brief Array [numMeasurments][degree]. For positions smaller than the current corePosition and for each measurment, this array contains the pre-computed
// * contraction of the first _ component tensors and the first _ components of the measurment operator. These tensors are deduplicated in the sense that for each unqiue
// * part of the position only one tensor is actually stored, which is why the is an array of pointers. The Tensors at the current corePosition are used as
// * scatch space. For convinience the underlying array (forwardStackMem) is larger, wherefore also the positions -1 and degree are allow, all poining to a {1} tensor
// * containing 1 as only entry. Note that position degree-1 must not be used.
// **/
// Tensor* const * const forwardStack;
//
// /// @brief Ownership holder for the unqiue Tensors referenced in forwardStack.
// std::unique_ptr<Tensor[]> forwardStackSaveSlots;
//
// /// @brief Vector containing for each corePosition a vector of the smallest ids of each group of unique forwardStack entries.
// std::vector<std::vector<size_t>> forwardUpdates;
//
//
// ///@brief Ownership holder for a (degree+2)*numMeasurments array of Tensor pointers. (Not used directly)
// std::unique_ptr<Tensor*[]> backwardStackMem;
//
// /** @brief Array [numMeasurments][degree]. For positions larger than the current corePosition and for each measurment, this array contains the pre-computed
// * contraction of the last _ component tensors and the last _ components of the measurment operator. These tensors are deduplicated in the sense that for each unqiue
// * part of the position only one tensor is actually stored, which is why the is an array of pointers. The Tensors at the current corePosition are used as
// * scratch space. For convinience the underlying array (forwardStackMem) is larger, wherefore also the positions -1 and degree are allow, all poining to a {1} tensor
// * containing 1 as only entry. Note that position zero must not be used.
// **/
// Tensor* const * const backwardStack;
//
// /// @brief Ownership holder for the unqiue Tensors referenced in backwardStack.
// std::unique_ptr<Tensor[]> backwardStackSaveSlots;
//
// /// @brief Vector containing for each corePosition a vector of the smallest ids of each group of unique backwardStack entries.
// std::vector<std::vector<size_t>> backwardUpdates;
//
// /// @brief: Norm of each rank one measurment operator
// std::unique_ptr<double[]> measurmentNorms;
//
// ///@brief: Reference to the performanceData object (external ownership)
// PerformanceData& perfData;
//
// void solve(TTTensor _x, const std::vector<std::vector<double>> _randomVariables, std::vector<Tensor> _solutions);
// };
//
// }
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
* @file
* @brief Header file for the ADF algorithm and its variants.
*/
#pragma once
#include "../index.h"
#include "../ttNetwork.h"
#include "../performanceData.h"
#include "../measurments.h"
namespace xerus {
void uq_adf(TTTensor& _x, const std::vector<std::vector<double>>& _randomVariables, const std::vector<Tensor>& _solutions, const size_t _polyDegree);
}
......@@ -43,17 +43,19 @@ namespace xerus {
// NOTE these two functions are used in the template functions of Tensor so they have to be declared before the class..
class Tensor;
/**
* @brief Low-level contraction between Tensors.
* @param _result Output for the result of the contraction.
* @param _lhs left hand side of the contraction.
* @param _lhsTrans Flags whether the LHS should be transposed (in the matrifications sense).
* @param _rhs right hand side of the contraction.
* @param _rhsTrans Flags whether the RHS should be transposed (in the matrifications sense).
* @param _numIndices number of indices that shall be contracted.
*/
void contract(Tensor& _result, const Tensor& _lhs, const bool _lhsTrans, const Tensor& _rhs, const bool _rhsTrans, const size_t _numIndices);
Tensor contract(const Tensor& _lhs, const bool _lhsTrans, const Tensor& _rhs, const bool _rhsTrans, const size_t _numIndices);
/**
* @brief Low-level contraction between Tensors.
* @param _result Output for the result of the contraction.
* @param _lhs left hand side of the contraction.
* @param _lhsTrans Flags whether the LHS should be transposed (in the matrifications sense).
* @param _rhs right hand side of the contraction.
* @param _rhsTrans Flags whether the RHS should be transposed (in the matrifications sense).
* @param _numIndices number of indices that shall be contracted.
*/
void contract(Tensor& _result, const Tensor& _lhs, const bool _lhsTrans, const Tensor& _rhs, const bool _rhsTrans, const size_t _numIndices);
Tensor contract(const Tensor& _lhs, const bool _lhsTrans, const Tensor& _rhs, const bool _rhsTrans, const size_t _numIndices);
/**
* @brief: Performs a simple reshuffle. Much less powerfull then a full evaluate, but more efficient.
......@@ -850,6 +852,20 @@ namespace xerus {
/*- - - - - - - - - - - - - - - - - - - - - - - - - - External functions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/**
* @brief Low-level contraction between Tensors.
* @param _result Output for the result of the contraction.
* @param _lhs left hand side of the contraction.
* @param _rhs right hand side of the contraction.
* @param _numIndices number of indices that shall be contracted.
*/
XERUS_force_inline void contract(Tensor& _result, const Tensor& _lhs, const Tensor& _rhs, const size_t _numIndices) {
contract(_result, _lhs, false, _rhs, false, _numIndices);
}
XERUS_force_inline Tensor contract(const Tensor& _lhs, const Tensor& _rhs, const size_t _numIndices) {
return contract(_lhs, false, _rhs, false, _numIndices);
}
/*- - - - - - - - - - - - - - - - - - - - - - - - - - Basic arithmetics - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
......
// // Xerus - A General Purpose Tensor Library
// // Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf.
// //
// // Xerus is free software: you can redistribute it and/or modify
// // it under the terms of the GNU Affero General Public License as published
// // by the Free Software Foundation, either version 3 of the License,
// // or (at your option) any later version.
// //
// // Xerus is distributed in the hope that it will be useful,
// // but WITHOUT ANY WARRANTY; without even the implied warranty of
// // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// // GNU Affero General Public License for more details.
// //
// // You should have received a copy of the GNU Affero General Public License
// // along with Xerus. If not, see <http://www.gnu.org/licenses/>.
// //
// // For further information on Xerus visit https://libXerus.org
// // or contact us at contact@libXerus.org.
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf.
//
// /**
// * @file
// * @brief Implementation of the ADF variants.
// */
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// #include <xerus/algorithms/uqAdf.h>
//
// #include <xerus/indexedTensorMoveable.h>
// #include <xerus/misc/basicArraySupport.h>
// #include <xerus/misc/internal.h>
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// #ifdef _OPENMP
// #include <omp.h>
// #endif
//
// namespace xerus {
// void UQADF::solve(TTTensor _x, const std::vector<std::vector<double>> _randomVariables, std::vector<Tensor> _solutions) {
// double resDec1 = 0.0, resDec2 = 0.0, resDec3 = 0.0;
//
// for(; maxIterations == 0 || iteration < maxIterations; ++iteration) {
//
// // Move core back to position zero
// x.move_core(0, true);
//
// // Rebuild backwardStack
// for(size_t corePosition = x.degree()-1; corePosition > 0; --corePosition) {
// update_backward_stack(corePosition, x.get_component(corePosition));
// }
//
// calculate_residual(0);
//
// lastResidualNorm = residualNorm;
// double residualNormSqr = 0;
//
// #pragma omp parallel for schedule(static) reduction(+:residualNormSqr)
// for(size_t i = 0; i < numMeasurments; ++i) {
// residualNormSqr += misc::sqr(residual[i]);
// }
// residualNorm = std::sqrt(residualNormSqr)/normMeasuredValues;
//
// perfData.add(iteration, residualNorm, x, 0);
//
// // Check for termination criteria
// double resDec4 = resDec3; resDec3 = resDec2; resDec2 = resDec1;
// resDec1 = residualNorm/lastResidualNorm;
// // LOG(wup, resDec1*resDec2*resDec3*resDec4);
// if(residualNorm < targetResidualNorm || resDec1*resDec2*resDec3*resDec4 > misc::pow(minimalResidualNormDecrease, 4)) { break; }
//
//
// // Sweep from the first to the last component
// for(size_t corePosition = 0; corePosition < degree; ++corePosition) {
// if(corePosition > 0) { // For corePosition 0 this calculation is allready done in the calculation of the residual.
// calculate_residual(corePosition);
// }
//
// calculate_projected_gradient(corePosition);
//
// const std::vector<value_t> normAProjGrad = calculate_slicewise_norm_A_projGrad(corePosition);
//
// update_x(normAProjGrad, corePosition);
//
// // If we have not yet reached the end of the sweep we need to take care of the core and update our stacks
// if(corePosition+1 < degree) {
// x.move_core(corePosition+1, true);
// update_forward_stack(corePosition, x.get_component(corePosition));
// }
// }
// }
// }
//
// } // namespace xerus
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
* @file
* @brief Implementation of the ADF variants.
*/
#include <xerus/algorithms/uqAdf.h>
#include <xerus/indexedTensorMoveable.h>
#include <xerus/misc/basicArraySupport.h>
#include <xerus/misc/simpleNumerics.h>
#include <xerus/misc/internal.h>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace xerus {
Tensor randVar_to_position(const double _v, const size_t _polyDegree) {
const std::vector<xerus::misc::Polynomial> stochasticBasis = xerus::misc::Polynomial::build_orthogonal_base(_polyDegree, [](const double){return 1.0;}, -1., 1.);
Tensor p({stochasticBasis.size()});
for (size_t i = 0; i < stochasticBasis.size(); ++i) {
p[i] = stochasticBasis[i](_v);
}
return p;
}
void uq_adf(TTTensor& _x, const std::vector<std::vector<double>>& _randomVariables, const std::vector<Tensor>& _solutions, const size_t _polyDegree) {
REQUIRE(_randomVariables.size() == _solutions.size(), "ERROR");
const size_t N = _randomVariables.size();
const size_t d = _x.degree();
std::vector<std::vector<Tensor>> leftStack(d, std::vector<Tensor>(N));
std::vector<std::vector<Tensor>> rightStack(d, std::vector<Tensor>(N));
std::vector<Tensor> Ax;
// Rebuild right Stack
for(size_t corePosition = _x.degree()-1; corePosition > 2; --corePosition) {
Tensor tmp;
for(size_t j = 0; j < N; ++j) {
contract(tmp, randVar_to_position(_randomVariables[j][corePosition] ,_polyDegree), _x.get_component(corePosition-1), 1);
contract(rightStack[corePosition-1][j], rightStack[corePosition-2][j], tmp, 1);
}
}
for(size_t corePosition = 1; corePosition < _x.degree(); ++corePosition) {
Tensor tmp;
for(size_t j = 0; j < N; ++j) {
contract(tmp, _x.get_component(corePosition-1), randVar_to_position(_randomVariables[j][corePosition] ,_polyDegree), 1);
contract(leftStack[corePosition-1][j], leftStack[corePosition-2][j], tmp, 1);
}
}
LOG(bla, "Hallo Welt");
}
} // namespace xerus
......@@ -1346,6 +1346,7 @@ namespace xerus {
contract(result, _lhs, _lhsTrans, _rhs, _rhsTrans, _numIndices);
return result;
}
XERUS_force_inline std::tuple<size_t, size_t, size_t> calculate_factorization_sizes(const Tensor& _input, const size_t _splitPos) {
REQUIRE(_splitPos <= _input.degree(), "Split position must be in range.");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment