Commit 97e1aa0e authored by Michael Goette's avatar Michael Goette

for Construktors for HTTensors are consistent (still open HToperators)

parent b9657492
Pipeline #917 failed with stages
in 2 minutes and 50 seconds
......@@ -36,6 +36,11 @@
#include "indexedTensorMoveable.h"
#include "indexedTensorList.h"
#include <xerus/misc/internal.h>
namespace xerus {
/**
* @brief Specialized TensorNetwork class used to represent HTTensor and HToperators.
......@@ -129,49 +134,63 @@ namespace xerus {
* @param _rnd the random engine to be passed to the constructor of the component tensors.
* @param _dist the random distribution to be passed to the constructor of the component tensors.
*/
// template<class distribution=std::normal_distribution<value_t>, class generator=std::mt19937_64>
// static HTNetwork XERUS_warn_unused random(std::vector<size_t> _dimensions, const std::vector<size_t> &_ranks, distribution& _dist=xerus::misc::defaultNormalDistribution, generator& _rnd=xerus::misc::randomEngine) {
// const size_t numComponents = _dimensions.size()/N;
// XERUS_REQUIRE(_dimensions.size()%N==0, "Illegal number of dimensions for TTOperator.");
// XERUS_REQUIRE(_ranks.size()+1 == numComponents,"Non-matching amount of ranks given to TTNetwork::random.");
// XERUS_REQUIRE(!misc::contains(_dimensions, size_t(0)), "Trying to construct a TTTensor with dimension 0 is not possible.");
// XERUS_REQUIRE(!misc::contains(_ranks, size_t(0)), "Trying to construct random TTTensor with rank 0 is illegal.");
//
//
//
// TTNetwork result(_dimensions.size());
// const std::vector<size_t> targetRank = reduce_to_maximal_ranks(_ranks, _dimensions);
//
// for(size_t i = 0; i < numComponents; ++i) {
// const size_t leftRank = i==0 ? 1 : targetRank[i-1];
// const size_t rightRank = (i==numComponents-1) ? 1 : targetRank[i];
//
// if(isOperator) {
// const auto rndComp = Tensor::random({leftRank, _dimensions[i], _dimensions[numComponents+i], rightRank}, _dist, _rnd);
// result.set_component(i, rndComp);
// } else {
// const auto rndComp = Tensor::random({leftRank, _dimensions[i], rightRank}, _dist, _rnd);
// result.set_component(i, rndComp);
// }
// }
// result.move_core(0);
// return result;
// }
/**
* @brief Random constructs a TTNetwork with the given dimensions and ranks limited by the given rank.
template<class distribution=std::normal_distribution<value_t>, class generator=std::mt19937_64>
static HTNetwork XERUS_warn_unused random(std::vector<size_t> _dimensions, const std::vector<size_t> &_ranks, distribution& _dist=xerus::misc::defaultNormalDistribution, generator& _rnd=xerus::misc::randomEngine) {
const size_t numIntComp = static_cast<size_t>(std::pow(2,std::ceil(std::log2(static_cast<double>(_dimensions.size()/N ))))) - 1;
const size_t numComponents = numIntComp + _dimensions.size()/N;
const size_t numOfLeaves = _dimensions.size();
XERUS_REQUIRE(numOfLeaves%N==0, "Illegal number of dimensions/Leaves for HTOperator.");
XERUS_REQUIRE(_ranks.size()+1 == numComponents,"Non-matching amount of ranks given to HTNetwork::random.");
XERUS_REQUIRE(numIntComp >= 0,"No internal Components! ");
XERUS_REQUIRE(!misc::contains(_dimensions, size_t(0)), "Trying to construct a HTTensor with dimension 0 is not possible.");
XERUS_REQUIRE(!misc::contains(_ranks, size_t(0)), "Trying to construct random HTTensor with rank 0 is illegal.");
HTNetwork result(_dimensions.size());
//const std::vector<size_t> targetRank = reduce_to_maximal_ranks(_ranks, _dimensions);
const std::vector<size_t> targetRank = std::move(_ranks);
for(size_t i = 0; i < numComponents; ++i) {
//Create inner components
if (i < numIntComp){
const size_t child1Rank = 2 * i >= numComponents - 1 ? 1 : targetRank[2 * i];
const size_t child2Rank = 2 * i + 1 >= numComponents - 1 ? 1 : targetRank[2 * i + 1];
const size_t parentRank = i == 0 ? 1 : targetRank[i - 1];
const auto rndComp = Tensor::random({parentRank, child1Rank, child2Rank}, _dist, _rnd);
result.set_component(i, rndComp);
}
else {
const size_t parentRank = targetRank[i - 1];
if(isOperator) {
const auto rndComp = Tensor::random({parentRank, _dimensions[i - numIntComp], _dimensions[numOfLeaves + i - numIntComp]}, _dist, _rnd);
result.set_component(i, rndComp);
} else {
const auto rndComp = Tensor::random({parentRank, _dimensions[i - numIntComp]}, _dist, _rnd);
LOG(info,"dimension random = " << rndComp.dimensions);
result.set_component(i, rndComp);
}
}
}
//result.move_core(0);
return result;
}
/**
* @brief Random constructs a HTNetwork with the given dimensions and ranks limited by the given rank.
* @details The entries of the componend tensors are sampled independendly using the provided random generator and distribution.
* @param _dimensions the dimensions of the to be created TTNetwork.
* @param _rank the maximal allowed rank.
* @param _rnd the random engine to be passed to the constructor of the component tensors.
* @param _dist the random distribution to be passed to the constructor of the component tensors.
*/
// template<class distribution=std::normal_distribution<value_t>, class generator=std::mt19937_64>
// static TTNetwork XERUS_warn_unused random(const std::vector<size_t>& _dimensions, const size_t _rank, distribution& _dist=xerus::misc::defaultNormalDistribution, generator& _rnd=xerus::misc::randomEngine) {
// return TTNetwork::random(_dimensions, std::vector<size_t>(_dimensions.size()/N-1, _rank), _dist, _rnd);
// }
//
template<class distribution=std::normal_distribution<value_t>, class generator=std::mt19937_64>
static HTNetwork XERUS_warn_unused random(const std::vector<size_t>& _dimensions, const size_t _rank, distribution& _dist=xerus::misc::defaultNormalDistribution, generator& _rnd=xerus::misc::randomEngine) {
return HTNetwork::random(_dimensions, std::vector<size_t>((static_cast<size_t>(std::pow(2,std::ceil(std::log2(static_cast<double>(_dimensions.size()/N ))))) - 2 + _dimensions.size()/N), _rank), _dist, _rnd);
}
/**
* @brief Random constructs a TTNetwork with the given dimensions and ranks.
......@@ -283,7 +302,7 @@ namespace xerus {
* @param _dimensions the dimensions used to calculate the maximal ranks.
* @return the reduced ranks.
*/
// static std::vector<size_t> reduce_to_maximal_ranks(std::vector<size_t> _ranks, const std::vector<size_t>& _dimensions);
//static std::vector<size_t> reduce_to_maximal_ranks(std::vector<size_t> _ranks, const std::vector<size_t>& _dimensions);
/**
* @brief calculates the number of degrees of freedom of the manifold of fixed tt-rank @a _ranks with the given @a _dimensions
......@@ -423,7 +442,7 @@ namespace xerus {
* @details this is particularly useful after constructing an own TT tensor with set_component calls
* as these will assume that all orthogonalities are destroyed
*/
// void assume_core_position(const size_t _pos);
void assume_core_position(const size_t _pos);
/**
......
......@@ -48,7 +48,7 @@ namespace xerus {
template<bool isOperator>
HTNetwork<isOperator>::HTNetwork(const Tensor& _tensor, const double _eps, const size_t _maxRank) :
HTNetwork(_tensor, _eps, std::vector<size_t>(_tensor.degree() == 0 ? 0 : _tensor.degree()/N, _maxRank)) {}
HTNetwork(_tensor, _eps, std::vector<size_t>(_tensor.degree() == 0 ? 0 : (static_cast<size_t>(std::pow(2,std::ceil(std::log2(static_cast<double>(_tensor.degree()/N ))))) - 2 + _tensor.degree()/N) , _maxRank)) {}
template<bool isOperator>
......@@ -57,7 +57,7 @@ namespace xerus {
template<bool isOperator>
HTNetwork<isOperator>::HTNetwork(Tensor::DimensionTuple _dimensions) : TensorNetwork(ZeroNode::None), canonicalized(true), corePosition(0) {
dimensions = std::move(_dimensions);
REQUIRE(dimensions.size()%N==0, "Illegal degree for TTOperator.");
REQUIRE(dimensions.size()%N==0, "Illegal degree for HTOperator.");
REQUIRE(!misc::contains(dimensions, size_t(0)), "Zero is no valid dimension.");
//Number of Leafs
const size_t numLeaves = dimensions.size()/N;
......@@ -69,11 +69,6 @@ namespace xerus {
// set number of components
numberOfComponents = numLeaves + numIntCom; //TODO rethink this
LOG(info,"Number of Leaves:" << numLeaves);
LOG(info,"Number of Full Leaves:" << numFullLeaves);
LOG(info,"Number of Internal Components:" << numIntCom);
LOG(info,"Number of Components:" << numberOfComponents);
if (numLeaves == 0) {
nodes.emplace_back(std::make_unique<Tensor>());
return;
......@@ -113,10 +108,7 @@ namespace xerus {
//Loop for the leafs
for (size_t i = 0; i < numLeaves; ++i) {
neighbors.clear();
// First come the external dimension then comes the internal dimension for the leaf
// -1 one is a placeholder for external nodes
neighbors.emplace_back(-1, i, dimensions[i], true);
if(isOperator) { neighbors.emplace_back(-1, numLeaves + i, dimensions[numLeaves + i], true); }
//The leafs are the last nodes, numIntCom .. numIntCom + numFullLeaves
//A parent node is calculated by ((numIntCom + i)+1) / 2 -1 , where i is the linear position in the node vector
......@@ -124,6 +116,11 @@ namespace xerus {
//This means from a leafs perspective it is mod 2, which translate wrt i to i%2 + 1
neighbors.emplace_back(((numIntCom + i) + 1) / 2 - 1, i%2 + 1, 1, false);
// First come the external dimension then comes the internal dimension for the leaf
// -1 one is a placeholder for external nodes
neighbors.emplace_back(-1, i, dimensions[i], true);
if(isOperator) { neighbors.emplace_back(-1, numLeaves + i, dimensions[numLeaves + i], true); }
if(!isOperator) {
nodes.emplace_back( std::make_unique<Tensor>(Tensor::dirac({1, dimensions[i]}, 0)), std::move(neighbors) );
} else {
......@@ -150,7 +147,7 @@ namespace xerus {
HTNetwork<isOperator>::HTNetwork(const Tensor& _tensor, const double _eps, const TensorNetwork::RankTuple& _maxRanks): HTNetwork(_tensor.degree()) {
REQUIRE(_tensor.degree()%N==0, "Number of indices must be even for HTOperator");
REQUIRE(_eps >= 0 && _eps < 1, "_eps must be positive and smaller than one. " << _eps << " was given.");
//REQUIRE(_maxRanks.size() == num_ranks(), "We need " << num_ranks() <<" ranks but " << _maxRanks.size() << " where given");
REQUIRE(_maxRanks.size() == num_ranks(), "We need " << num_ranks() <<" ranks but " << _maxRanks.size() << " where given");
REQUIRE(!misc::contains(_maxRanks, size_t(0)), "Maximal ranks must be strictly positive. Here: " << _maxRanks);
const size_t numExternalComponent = degree()/N;
......@@ -177,53 +174,58 @@ namespace xerus {
remains = _tensor;
}
// Add ghost dimensions used in the nodes
// std::vector<size_t> extDimensions;
// extDimensions.reserve(remains.degree()+2);
// extDimensions.emplace_back(1);
// extDimensions.insert(extDimensions.end(), remains.dimensions.begin(), remains.dimensions.end());
// extDimensions.emplace_back(1);
// remains.reinterpret_dimensions(extDimensions);
Tensor singularValues, newNode;
//for the leaves TODO add operator functionality
for(size_t pos = numberOfComponents - numExternalComponent,i = 0; pos < numberOfComponents; ++pos, ++i) {
std::vector<size_t> ithmode(_tensor.degree());
for(size_t j = 0; j < numExternalComponent; ++j) {
ithmode[j] = j == 0 ? i : (j == i ? 0 : j);
}
LOG(info,"i th Mode = " << ithmode);
LOG(info,"degree remain = " << remains.degree());
LOG(info,"degree remain dim = " << remains.dimensions);
xerus::reshuffle(remains, remains, ithmode);
LOG(info,"degree remain = " << remains.degree());
LOG(info,"degree remain dim = " << remains.dimensions);
calculate_svd(newNode, singularValues, remains, remains, 1, _maxRanks[i], _eps);
LOG(info,"degree remain = " << remains.degree());
LOG(info,"degree remain dim = " << remains.dimensions);
LOG(info,"singularValues = " << singularValues.degree());
LOG(info,"singularValues dim = " << singularValues.dimensions);
LOG(info,"newNode = " << newNode.degree());
LOG(info,"newNode dim = " << newNode.dimensions);
}
xerus::reshuffle(remains, remains, ithmode);
LOG(info,"degree remain = " << remains.degree());
LOG(info,"degree remain dim = " << remains.dimensions);
calculate_svd(newNode, singularValues, remains, remains, 1, _maxRanks[pos - 1], _eps);
xerus::reshuffle(newNode, newNode, {1,0});
set_component(pos, std::move(newNode));
newNode.reset();
xerus::contract(remains, singularValues, false, remains, false, 1);
LOG(info,"degree remain = " << remains.degree());
LOG(info,"degree remain dim = " << remains.dimensions);
xerus::reshuffle(remains, remains, ithmode);
}
//for the internal components
size_t lvl = static_cast<size_t>(std::floor(std::log2(static_cast<double>(numberOfComponents - numExternalComponent))));
//add dummy dimensions to remainder
Tensor::DimensionTuple wdummydim(static_cast<size_t>(std::pow(2,lvl + 1)));
for (size_t i = 0; i < std::pow(2,lvl + 1); i++){
wdummydim[i] = i < numExternalComponent ? remains.dimensions[i] : 1;
}
remains.reinterpret_dimensions(wdummydim);
for(; lvl > 0; --lvl) {
for (size_t pos = 0; pos < std::pow(2,lvl); ++pos){
std::vector<size_t> ithmode(remains.degree());
std::vector<size_t> ithmodeinv(remains.degree() - 1);
size_t lengthrem = remains.degree();
for(size_t j = 0; j < lengthrem ; ++j) {
ithmode[j] = j == pos ? 0 : (j== pos + 1 ? 1 : j < pos ? j + 2 : j );
}
for(size_t j = 0; j < lengthrem - 1; ++j) {
ithmodeinv[j] = j == 0 ? pos : (j <= pos ? j - 1 : j);
}
xerus::reshuffle(remains, remains, ithmode);
calculate_svd(newNode, singularValues, remains, remains, 2, _maxRanks[static_cast<size_t>(std::pow(2,lvl)) + pos - 2], _eps); // TODO fix maxRanks
xerus::reshuffle(newNode, newNode, {1,2,0}); // first parent then children
set_component(static_cast<size_t>(std::pow(2,lvl)) + pos - 1, std::move(newNode));
newNode.reset();
xerus::contract(remains, singularValues, false, remains, false, 1);
xerus::reshuffle(remains, remains, ithmodeinv);
//set_component(0, remains);
//assume_core_position(0);
}
}
Tensor::DimensionTuple wdummydimroot({remains.dimensions[0], remains.dimensions[1], 1});
remains.reinterpret_dimensions(wdummydimroot);
xerus::reshuffle(remains, remains, {1,2,0}); // first parent then children
set_component(0, remains);
assume_core_position(0);
}
//
//
......@@ -430,13 +432,13 @@ namespace xerus {
size_t HTNetwork<isOperator>::num_ranks() const {
return degree() == 0 ? 0 : numberOfComponents - 1;
}
//
// /*- - - - - - - - - - - - - - - - - - - - - - - - - - Miscellaneous - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
//
/*- - - - - - - - - - - - - - - - - - - - - - - - - - Miscellaneous - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
// template<bool isOperator>
// std::vector<size_t> TTNetwork<isOperator>::reduce_to_maximal_ranks(std::vector<size_t> _ranks, const std::vector<size_t>& _dimensions) {
// const size_t numComponents = _dimensions.size()/N;
// REQUIRE(_dimensions.size()%N == 0, "invalid number of dimensions for TTOperator");
// std::vector<size_t> HTNetwork<isOperator>::reduce_to_maximal_ranks(std::vector<size_t> _ranks, const std::vector<size_t>& _dimensions) {
// const size_t numComponents = static_cast<size_t>(std::pow(2,std::ceil(std::log2(static_cast<double>(_dimensions.size()/N ))))) - 1 + _dimensions.size()/N;
// REQUIRE(_dimensions.size()%N == 0, "invalid number of dimensions for HTOperator");
// REQUIRE(numComponents == _ranks.size()+1, "Invalid number of ranks ("<<_ranks.size()<<") or dimensions ("<<_dimensions.size()<<") given.");
//
// // Left to right sweep
......@@ -467,7 +469,7 @@ namespace xerus {
//
// return _ranks;
// }
//
//
// template<bool isOperator>
// size_t TTNetwork<isOperator>::degrees_of_freedom(const std::vector<size_t> &_dimensions, const std::vector<size_t> &_ranks) {
......@@ -549,6 +551,8 @@ namespace xerus {
size_t order = _T.degree();
//size_t numberOfDummyComponents = static_cast<size_t>(numberOfComponents) - 2 * degree()/N + 1; //TODO check this
TensorNode& currNode = nodes[_idx];
LOG(info, "dimension = " << _T.dimensions);
LOG(info, "iodx = " << _idx);
*currNode.tensorObject = std::move(_T);
for (size_t i = 0; i < order; ++i) {
currNode.neighbors[i].dimension = currNode.tensorObject->dimensions[i];
......@@ -802,12 +806,12 @@ namespace xerus {
// }
//
//
// template<bool isOperator>
// void TTNetwork<isOperator>::assume_core_position(const size_t _pos) {
// REQUIRE(_pos < degree()/N || (degree() == 0 && _pos == 0), "Invalid core position.");
// corePosition = _pos;
// canonicalized = true;
// }
template<bool isOperator>
void HTNetwork<isOperator>::assume_core_position(const size_t _pos) {
REQUIRE(_pos < degree()/N || (degree() == 0 && _pos == 0), "Invalid core position.");
corePosition = _pos;
canonicalized = true;
}
//
//
// template<bool isOperator>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment