Commit 9157b1a1 authored by Sebastian Wolf's avatar Sebastian Wolf

Added Tensor(Network)::order() and depricated Tensor(Network)::degree()

parent d1b50c37
......@@ -19,7 +19,7 @@ public:
size_t maxIterations;
InternalSolver(const TTOperator& _A, TTTensor& _x, const TTTensor& _b)
: d(_x.degree()), x(_x), A(_A), b(_b), solutionsNorm(frob_norm(_b)), maxIterations(1000)
: d(_x.order()), x(_x), A(_A), b(_b), solutionsNorm(frob_norm(_b)), maxIterations(1000)
{
leftAStack.emplace_back(Tensor::ones({1,1,1}));
rightAStack.emplace_back(Tensor::ones({1,1,1}));
......
......@@ -40,7 +40,7 @@ Tensor create_S() {
TTOperator create_operator(const size_t _degree) {
TTOperator create_operator(const size_t _order) {
const Index i, j, k, l;
// Create matrices
......@@ -51,7 +51,7 @@ TTOperator create_operator(const size_t _degree) {
const Tensor I = Tensor::identity({MAX_NUM_PER_SITE, MAX_NUM_PER_SITE});
// Create empty TTOperator
TTOperator A(2*_degree);
TTOperator A(2*_order);
Tensor comp;
......@@ -71,7 +71,7 @@ TTOperator create_operator(const size_t _degree) {
+ L(j, k) * Tensor::dirac({3, 3}, {2, 1})(i, l)
+ I(j, k) * Tensor::dirac({3, 3}, {2, 2})(i, l);
for(size_t c = 1; c+1 < _degree; ++c) {
for(size_t c = 1; c+1 < _order; ++c) {
A.set_component(c, comp);
}
......@@ -81,7 +81,7 @@ TTOperator create_operator(const size_t _degree) {
+ M(j, k)*Tensor::dirac({3, 1}, 1)(i, l)
+ S(j, k)*Tensor::dirac({3, 1}, 2)(i, l);
A.set_component(_degree-1, comp);
A.set_component(_order-1, comp);
return A;
}
......@@ -132,21 +132,21 @@ double get_mean_concentration(const TTTensor& _res, const size_t _i) {
});
const Tensor ones = Tensor::ones({MAX_NUM_PER_SITE});
for (size_t j = 0; j < _res.degree(); ++j) {
for (size_t j = 0; j < _res.order(); ++j) {
if (j == _i) {
result(l&0) = result(k, l&1) * weights(k);
} else {
result(l&0) = result(k, l&1) * ones(k);
}
}
// at this point the degree of 'result' is 0, so there is only one entry
// at this point the order of 'result' is 0, so there is only one entry
return result[{}];
}
void print_mean_concentrations_to_file(const std::vector<TTTensor> &_result) {
std::fstream out("mean.dat", std::fstream::out);
for (const auto& res : _result) {
for (size_t k = 0; k < res.degree(); ++k) {
for (size_t k = 0; k < res.order(); ++k) {
out << get_mean_concentration(res, k) << ' ';
}
out << std::endl;
......@@ -166,7 +166,7 @@ int main() {
start.use_dense_representations();
start += 1e-14 * TTTensor::random(
start.dimensions,
std::vector<size_t>(start.degree()-1, rankX-1)
std::vector<size_t>(start.order()-1, rankX-1)
);
const auto A = create_operator(numProteins);
......
......@@ -88,7 +88,7 @@ namespace xerus { namespace internal {
/*- - - - - - - - - - - - - - - - - - - - - - - - - - Miscellaneous - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
public:
size_t degree() const;
size_t order() const;
Tensor& component(const size_t _idx);
......
......@@ -28,10 +28,10 @@
namespace xerus { namespace examples {
/**
* @brief Constructs a rank _n+1 TTTensor of degree @a _degree and external dimensions @a _n that has entries >0, maximal where all indices coincide.
* @brief Constructs a rank _n+1 TTTensor of order @a _order and external dimensions @a _n that has entries >0, maximal where all indices coincide.
* @details Constructed as a sum of nearest-neighbor terms that each have entries as 1/(std::abs(i-j)+alpha)
*/
TTTensor peaking_diagonals(size_t _degree, size_t _n, value_t _alpha = 1.0);
TTTensor peaking_diagonals(size_t _order, size_t _n, value_t _alpha = 1.0);
}}
......@@ -80,16 +80,16 @@ namespace xerus {
/**
* @brief Constructs an zero initialized HTNetwork with the given degree and ranks all equal to one.
* @details Naturally for HTOperators the degree must be even.
* @param _degree number of physical indices
* @brief Constructs an zero initialized HTNetwork with the given order and ranks all equal to one.
* @details Naturally for HTOperators the order must be even.
* @param _order number of physical indices
*/
explicit HTNetwork(const size_t _degree);
explicit HTNetwork(const size_t _order);
/**
* @brief Constructs an zero initialized HTNetwork with the given dimensions and ranks all equal to one.
* @details Naturally for HTOperators the degree must be even.
* @details Naturally for HTOperators the order must be even.
* @params _dimensions Tuple of the dimensions of the physical indices
*/
explicit HTNetwork(Tensor::DimensionTuple _dimensions);
......@@ -234,7 +234,7 @@ namespace xerus {
/*- - - - - - - - - - - - - - - - - - - - - - - - - - Internal helper functions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
protected:
/**
* @brief Return the number of ranks, i.e. 0 for degree zero and number of components -1 otherwise.
* @brief Return the number of ranks, i.e. 0 for order zero and number of components -1 otherwise.
* @return number of ranks
*/
size_t num_ranks() const;
......@@ -439,7 +439,7 @@ namespace xerus {
const std::vector<size_t> shuffle({0,2,1});
size_t numComp = get_number_of_components();
//only leaves
for (size_t n = numComp - 1; n >= numComp - degree()/N; --n) {
for (size_t n = numComp - 1; n >= numComp - order()/N; --n) {
xerus::reshuffle(component(n), component(n), shuffle);
}
}
......
......@@ -105,10 +105,10 @@ namespace xerus {
Index& operator=(const Index&) = default;
/// @brief Returns the span this index actually represents in a tensor of given order.
void set_span(const size_t _degree);
void set_span(const size_t _order);
/// @brief Returns the span this index actually represents in a tensor of given order.
size_t actual_span(const size_t _degree) const;
size_t actual_span(const size_t _order) const;
/// @brief Checks whether the Index represents a fixed number.
bool fixed() const;
......@@ -138,14 +138,14 @@ namespace xerus {
/** @brief: Allow the creation of Indices covering all but x dimensions using the and operator.
* E.g. A() = B(i&0) * C(i&0), defines A as the full contraction between B and C,
* indifferent of the actual degree of B and C.
* indifferent of the actual order of B and C.
* @param _span Number of dimensions NOT to be covered by this index.
*/
Index operator&(const size_t _span) const;
/** @brief: Allow the creation of Indices covering an x-th fraction of the indices.
* E.g. A(i&0) = B(i/2, j/2) * C(j&0), defines A as the contraction between the symmetric matrification
* of B and the vectorisation of C, indifferent of the actual degree of B and C.
* of B and the vectorisation of C, indifferent of the actual order of B and C.
* @param _span the fraction of the dimensions to be covered by this index.
*/
Index operator/(const size_t _span) const;
......
......@@ -86,20 +86,20 @@ namespace xerus {
/*- - - - - - - - - - - - - - - - - - - - - - - - - - Others - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
///@brief Allows cast to value_t if the degree of the current object is equal to 0.
///@brief Allows cast to value_t if the order of the current object is equal to 0.
explicit operator value_t() const;
///@brief Checks whether _otherTensor is the tensorObejct of this IndexTensor.
bool uses_tensor(const tensor_type* _otherTensor) const;
///@brief Returns the degree of the associated tensorObejct
size_t degree() const;
///@brief Returns the order of the associated tensorObejct
size_t order() const;
///@brief Assignes the indices using the degree of the tensorObejct.
///@brief Assignes the indices using the order of the tensorObejct.
void assign_indices();
///@brief Assignes the indices assuming the given degree.
void assign_indices(const size_t _degree);
///@brief Assignes the indices assuming the given order.
void assign_indices(const size_t _order);
///@brief Assignes the indices using the current tensorObejct.
void assign_index_dimensions();
......@@ -156,7 +156,7 @@ namespace xerus {
///@brief Returns the one-norm of the associated tensor Obejct.
value_t one_norm(const IndexedTensorReadOnly<Tensor>& _idxTensor);
size_t get_eval_degree(const std::vector<Index>& _indices);
size_t get_eval_order(const std::vector<Index>& _indices);
}
......
......@@ -387,12 +387,20 @@ namespace xerus {
/*- - - - - - - - - - - - - - - - - - - - - - - - - - Information - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/**
* @brief Returns the degree of the tensor.
* @brief [Deprecated] Returns the degree of the tensor.
* @details The degree is always equals to dimensions.size()
* @return the degree of the tensor
*/
XERUS_deprecated("Tensor::degree() is deprecated and will be removed in a future version. Use Tensor::order() instead.")
size_t degree() const;
/**
* @brief Returns the order of the tensor.
* @details The order is always equal to dimensions.size()
* @return the order of the tensor
*/
size_t order() const;
/**
* @brief Checks whether the tensor has a non-trivial global scaling factor.
* @return true if there is a non-trivial factor, false if not.
......
......@@ -113,7 +113,7 @@ namespace xerus {
size_t size() const noexcept;
size_t degree() const noexcept;
size_t order() const noexcept;
void erase() noexcept;
};
......@@ -121,7 +121,7 @@ namespace xerus {
protected:
/**
* @brief Internal indicator to prevent the creation of an degree zero node in TensorNetwork constructor.
* @brief Internal indicator to prevent the creation of an order zero node in TensorNetwork constructor.
*/
enum class ZeroNode : bool { None, Add };
......@@ -169,16 +169,16 @@ namespace xerus {
/**
* @brief Constructs the trivial TensorNetwork containing a Tensor with the given degree.
* @brief Constructs the trivial TensorNetwork containing a Tensor with the given order.
* @details All dimensions are set equals one and the only entry
* of the tensor is zero.
*/
TensorNetwork(size_t _degree);
TensorNetwork(size_t _order);
/**
* @brief (Internal) Constructs an order zero TensorNetwork.
* @details The order of an empty TN is zero.
* @param _nodeStatus defines whether the network will contain one degree zero node with the single
* @param _nodeStatus defines whether the network will contain one order zero node with the single
* entry zero.
*/
explicit TensorNetwork(const ZeroNode _nodeStatus);
......@@ -367,12 +367,21 @@ namespace xerus {
/*- - - - - - - - - - - - - - - - - - - - - - - - - - Miscellaneous - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/**
* @brief Gets the degree of the TensorNetwork.
* @brief [Deprecated] Gets the degree of the TensorNetwork.
* @details The degree is defined as the number of dimensions (i.e. dimensions.size())
* and is always equal to the number of externalLinks (i.e. externalLinks.size()).
* @return the degree.
*/
XERUS_deprecated("TensorNetwork::degree() is deprecated and will be removed in a future version. Use TensorNetwork::order() instead.")
size_t degree() const;
/**
* @brief Gets the order of the TensorNetwork.
* @details The order is defined as the number of dimensions (i.e. dimensions.size())
* and is always equal to the number of externalLinks (i.e. externalLinks.size()).
* @return the order.
*/
size_t order() const;
/**
* @brief Calculates the storage requirement of the current representation.
......@@ -419,7 +428,7 @@ namespace xerus {
/**
* @brief Finds traces defined by the indices and internally links the corresponding indices. Also applys all fixed indices
* @details For each trace this reduces the degree of the TN by two and removes two indices from the IndexedTensor.
* @details For each trace this reduces the order of the TN by two and removes two indices from the IndexedTensor.
*/
static void link_traces_and_fix(internal::IndexedTensorWritable<TensorNetwork>&& _base);
......
......@@ -75,15 +75,15 @@ namespace xerus {
/**
* @brief Constructs an zero initialized TTNetwork with the given degree and ranks all equal to one.
* @details Naturally for TTOperators the degree must be even.
* @brief Constructs an zero initialized TTNetwork with the given order and ranks all equal to one.
* @details Naturally for TTOperators the order must be even.
*/
explicit TTNetwork(const size_t _degree);
explicit TTNetwork(const size_t _order);
/**
* @brief Constructs an zero initialized TTNetwork with the given dimensions and ranks all equal to one.
* @details Naturally for TTOperators the degree must be even.
* @details Naturally for TTOperators the order must be even.
*/
explicit TTNetwork(Tensor::DimensionTuple _dimensions);
......@@ -264,11 +264,11 @@ namespace xerus {
bool exceeds_maximal_ranks() const;
///@brief Return the number of components, i.e. degree()/N.
///@brief Return the number of components, i.e. order()/N.
size_t num_components() const;
///@brief Return the number of ranks, i.e. 0 for degree zero and degree()/N-1 otherwise.
///@brief Return the number of ranks, i.e. 0 for order zero and order()/N-1 otherwise.
size_t num_ranks() const;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - Miscellaneous - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
......@@ -431,7 +431,7 @@ namespace xerus {
/**
* @brief Move the core to the left.
* @details Basically calls move_core() with _position = degree()-1
* @details Basically calls move_core() with _position = order()-1
*/
void canonicalize_right();
......@@ -443,7 +443,7 @@ namespace xerus {
template<bool B = isOperator, typename std::enable_if<B, int>::type = 0>
void transpose() {
const std::vector<size_t> shuffle({0,2,1,3});
for (size_t n = 0; n < degree()/N; ++n) {
for (size_t n = 0; n < order()/N; ++n) {
xerus::reshuffle(component(n), component(n), shuffle);
}
}
......
......@@ -119,7 +119,7 @@ static misc::UnitTest tensor_svd_soft("Tensor", "SVD_soft_thresholding", [](){
(U(i,j,k,o), S(o,p), V(p,l,m,n)) = SVD(A(i,j,k,l,m,n));
(Us(i,j,k,o), Ss(o,p), Vs(p,l,m,n)) = SVD(A(i,j,k,l,m,n), 7.3);
U.resize_mode(U.degree()-1, Ss.dimensions[0]);
U.resize_mode(U.order()-1, Ss.dimensions[0]);
V.resize_mode(0, Ss.dimensions[0]);
TEST(approx_equal(U, Us, 1e-12));
......
......@@ -344,7 +344,7 @@ static misc::UnitTest tensor_solve_matrix("Tensor", "solve_matrix", [](){
}
});
static misc::UnitTest tensor_solve_w_extra_degree("Tensor", "solve_with_extra_degrees", [](){
static misc::UnitTest tensor_solve_w_extra_order("Tensor", "solve_with_extra_orders", [](){
Index ii,jj,kk,ll,mm,nn;
Tensor A = xerus::Tensor::random({2,2});
Tensor B = xerus::Tensor::random({2,2});
......
......@@ -66,7 +66,7 @@ UNIT_TEST(Strassen, CP,
while (res > 1e-4) {
TTTensor ttDiff(_A.degree());
TTTensor ttDiff(_A.order());
// bool set = false;
while (decomp.size() >= _r) {
// ttDiff = TTTensor(*decomp.front()); //()
......@@ -97,7 +97,7 @@ UNIT_TEST(Strassen, CP,
}
// }
if (_A.degree() == 3) {
if (_A.order() == 3) {
Tensor &tn0 = *std::static_pointer_cast<Tensor>(ttDiff.nodes[0].tensorObject);
Tensor &tn1 = *std::static_pointer_cast<Tensor>(ttDiff.nodes[1].tensorObject);
Tensor &tn2 = *std::static_pointer_cast<Tensor>(ttDiff.nodes[2].tensorObject);
......
......@@ -48,10 +48,10 @@ extern "C"
using namespace xerus;
static Tensor::DimensionTuple random_dimensions(const size_t _degree, const size_t _maxDim, std::mt19937_64 _rnd) {
static Tensor::DimensionTuple random_dimensions(const size_t _order, const size_t _maxDim, std::mt19937_64 _rnd) {
std::uniform_int_distribution<size_t> dist(1, _maxDim);
Tensor::DimensionTuple dims;
for(size_t i = 0; i < _degree; ++i) { dims.emplace_back(dist(_rnd)); }
for(size_t i = 0; i < _order; ++i) { dims.emplace_back(dist(_rnd)); }
return dims;
}
......@@ -158,15 +158,15 @@ static misc::UnitTest tensor_constructors("Tensor", "Constructors", [](){
for(size_t i = 0; i < tensors.size(); ++i) {
// Test defaults being degree zero
// Test defaults being order zero
if(i < 4) {
MTEST(tensors[i].degree() == 0, i);
MTEST(tensors[i].order() == 0, i);
} else {
MTEST(tensors[i].degree() == 10, i);
MTEST(tensors[i].order() == 10, i);
}
// Test degree calculation
MTEST(tensors[i].degree() == tensors[i].dimensions.size(), i);
// Test order calculation
MTEST(tensors[i].order() == tensors[i].dimensions.size(), i);
// Test size calcualtion
MTEST(tensors[i].size == misc::product(tensors[i].dimensions), i);
......
......@@ -24,7 +24,7 @@
#include "../../include/xerus/misc/internal.h"
using namespace xerus;
static misc::UnitTest tn_contr40("TensorNetwork", "contractions_of_4_to_degree_0", [](){
static misc::UnitTest tn_contr40("TensorNetwork", "contractions_of_4_to_order_0", [](){
Tensor A = Tensor::random({100,1});
Tensor B = Tensor::random({100,1});
Tensor C = Tensor::random({100,1});
......@@ -45,7 +45,7 @@ static misc::UnitTest tn_contr40("TensorNetwork", "contractions_of_4_to_degree_0
TEST(misc::approx_equal(E[{}], a1 * a2, 1e-20));
});
static misc::UnitTest tn_contr30("TensorNetwork", "contractions_of_3_to_degree_0", [](){
static misc::UnitTest tn_contr30("TensorNetwork", "contractions_of_3_to_order_0", [](){
Tensor A = Tensor::random({1,10});
Tensor B = Tensor::random({10,100});
Tensor C = Tensor::random({100,1});
......
......@@ -130,7 +130,7 @@ static misc::UnitTest tt_namedconstr("TT", "named_constructors", [](){
TTTensor X = TTTensor::random(dimensions, ranks);
std::vector<size_t> found_ranks = X.ranks();
X.move_core(X.degree()-1);
X.move_core(X.order()-1);
X.move_core(0);
MTEST(X.ranks() == found_ranks, X.ranks() << " vs " << found_ranks);
// {
......@@ -145,7 +145,7 @@ static misc::UnitTest tt_namedconstr("TT", "named_constructors", [](){
TTOperator Xop = TTOperator::random(operatorDimensions, ranks);
found_ranks = Xop.ranks();
Xop.move_core(X.degree()-1);
Xop.move_core(X.order()-1);
Xop.move_core(0);
MTEST(Xop.ranks() == found_ranks, Xop.ranks() << " vs " << found_ranks);
// {
......
This diff is collapsed.
......@@ -61,7 +61,7 @@ namespace xerus {
// direction: decreasing index
for (size_t p = _data.ALS.sites-1; p>0; --p) {
Tensor S, Vt;
// calculate_svd(x, S, Vt, x, x.degree()-1, _data.targetRank[_data.currIndex+p-1], EPSILON); TODO
// calculate_svd(x, S, Vt, x, x.order()-1, _data.targetRank[_data.currIndex+p-1], EPSILON); TODO
(x(i&1,j), S(j,k), Vt(k,l&1)) = SVD(x(i&2,l^2), _data.targetRank[_data.currIndex+p-1]);
_x[p] = std::move(Vt);
x(i&1,k) = x(i&1,j) * S(j,k);
......@@ -103,7 +103,7 @@ namespace xerus {
* modifies x
*/
void ALSVariant::ALSAlgorithmicData::prepare_x_for_als() {
const size_t d = x.degree();
const size_t d = x.order();
Index r1,r2,n1,cr1;
size_t firstOptimizedIndex = 0;
......@@ -215,7 +215,7 @@ namespace xerus {
}
void ALSVariant::ALSAlgorithmicData::prepare_stacks() {
const size_t d = x.degree();
const size_t d = x.order();
Index r1,r2;
Tensor tmpA;
......@@ -485,7 +485,7 @@ namespace xerus {
#ifndef XERUS_DISABLE_RUNTIME_CHECKS
_x.require_correct_format();
_b.require_correct_format();
REQUIRE(_x.degree() > 0, "");
REQUIRE(_x.order() > 0, "");
REQUIRE(_x.dimensions == _b.dimensions, "");
if (_Ap != nullptr) {
......@@ -493,7 +493,7 @@ namespace xerus {
REQUIRE(_Ap->dimensions.size() == _b.dimensions.size()*2, "");
for (size_t i=0; i<_x.dimensions.size(); ++i) {
REQUIRE(_Ap->dimensions[i] == _x.dimensions[i], "");
REQUIRE(_Ap->dimensions[i+_Ap->degree()/2] == _x.dimensions[i], "");
REQUIRE(_Ap->dimensions[i+_Ap->order()/2] == _x.dimensions[i], "");
}
}
#endif
......
......@@ -51,7 +51,7 @@ namespace xerus { namespace impl_TrASD {
internal::BlockTT x;
///@brief Degree of the solution.
const size_t degree;
const size_t order;
///@brief Reference to the measurment set (external ownership)
const RankOneMeasurementSet& measurments;
......@@ -133,7 +133,7 @@ namespace xerus { namespace impl_TrASD {
OptimizationSolver(_optiAlgorithm, _perfData),
outX(_x),
x(_x, 0, P),
degree(_x.degree()),
order(_x.order()),
measurments(_measurments),
numMeasurments(_measurments.size()),
......@@ -154,13 +154,13 @@ namespace xerus { namespace impl_TrASD {
bestTestResidual(std::numeric_limits<double>::max()),
prevRanks(tracking, outX.ranks()),
leftStack(degree, std::vector<Tensor>(numMeasurments)),
rightStack(degree, std::vector<Tensor>(numMeasurments))
leftStack(order, std::vector<Tensor>(numMeasurments)),
rightStack(order, std::vector<Tensor>(numMeasurments))
{
_x.require_correct_format();
XERUS_REQUIRE(numMeasurments > 0, "Need at very least one measurment.");
XERUS_REQUIRE(measurments.order() == degree, "Measurment degree must coincide with x degree.");
XERUS_REQUIRE(measurments.order() == order, "Measurment order must coincide with x order.");
// Create test set
std::uniform_real_distribution<double> stochDist(0.0, 1.0);
......@@ -187,10 +187,10 @@ namespace xerus { namespace impl_TrASD {
/// and the given component contracted with the component of the measurment operator. For _corePosition == corePosition and _currentComponent == x.components(corePosition)
/// this really updates the stack, otherwise it uses the stack as scratch space.
void update_left_stack(const size_t _position) {
REQUIRE(_position+1 < degree, "Invalid position");
REQUIRE(_position+1 < order, "Invalid position");
Tensor measCmp;
if(_position > 0 && _position+1 < degree) {
if(_position > 0 && _position+1 < order) {
const Tensor shuffledX = reshuffle(x.get_component(_position), {1, 0, 2});
for(size_t i = 0; i < numMeasurments; ++i ) {
contract(measCmp, measurments.positions[i][_position], shuffledX, 1);
......@@ -209,11 +209,11 @@ namespace xerus { namespace impl_TrASD {
/// and the given component contracted with the component of the measurment operator. For _corePosition == corePosition and _currentComponent == x.components(corePosition)
/// this really updates the stack, otherwise it uses the stack as scratch space.
void update_right_stack(const size_t _position) {
REQUIRE(_position > 0 && _position < degree, "Invalid position");
REQUIRE(_position > 0 && _position < order, "Invalid position");
Tensor measCmp;
if(_position > 0 && _position+1 < degree) {
if(_position > 0 && _position+1 < order) {
const Tensor shuffledX = reshuffle(x.get_component(_position), {1, 0, 2});
for(size_t i = 0; i < numMeasurments; ++i ) {
contract(measCmp, measurments.positions[i][_position], shuffledX, 1);
......@@ -232,7 +232,7 @@ namespace xerus { namespace impl_TrASD {
///@brief: Calculates the component at _corePosition of the projected gradient from the residual, i.e. E(A^T(b-Ax)).
Tensor calculate_delta(const size_t _corePosition, const size_t _setId) {
const size_t localLeftRank = _corePosition == 0 ? 1 : x.rank(_corePosition-1);
const size_t localRightRank = _corePosition+1 == degree ? 1 : x.rank(_corePosition);
const size_t localRightRank = _corePosition+1 == order ? 1 : x.rank(_corePosition);
const size_t dyadDim = localLeftRank*localRightRank;
Tensor delta({x.dimensions[_corePosition], localLeftRank, localRightRank}, Tensor::Representation::Dense);
......@@ -240,7 +240,7 @@ namespace xerus { namespace impl_TrASD {
const Tensor core = x.get_core(_setId);
Tensor leftCore, leftCorePos, leftCorePosRight;
if( _corePosition > 0 && _corePosition+1 < degree) {
if( _corePosition > 0 && _corePosition+1 < order) {
for(size_t idx = 0; idx < sets[_setId].size(); ++idx) {
const size_t i = sets[_setId][idx];
contract(leftCore, leftStack[_corePosition-1][i], core, 1);
......@@ -316,7 +316,7 @@ namespace xerus { namespace impl_TrASD {
Tensor leftCore, leftCorePos, leftCorePosRight;
value_t normSqrAProjGrad = 0.0;
if( _corePosition > 0 && _corePosition+1 < degree) {
if( _corePosition > 0 && _corePosition+1 < order) {
for(size_t idx = 0; idx < sets[_setId].size(); ++idx) {
const size_t i = sets[_setId][idx];
contract(leftCore, leftStack[_corePosition-1][i], _delta, 1);
......@@ -365,7 +365,7 @@ namespace xerus { namespace impl_TrASD {
void finish() {
for(size_t i = 0; i < bestX.degree(); i++) {
for(size_t i = 0; i < bestX.order(); i++) {
if(i == bestX.corePosition) {
outX.set_component(i, bestX.get_average_core());
} else {
......@@ -425,7 +425,7 @@ namespace xerus { namespace impl_TrASD {
// Build inital right stack
REQUIRE(x.corePosition == 0, "Expecting core position to be 0.");
for(size_t corePosition = degree-1; corePosition > 0; --corePosition) {
for(size_t corePosition = order-1; corePosition > 0; --corePosition) {
update_right_stack(corePosition);
}
......@@ -449,7 +449,7 @@ namespace xerus { namespace impl_TrASD {
// LOG(ASD, "Residual " << std::scientific << optResidual << " " << /*setResiduals*/ -1 << ". Controlset: " << testResidual << ". Ranks: " << x.ranks() << ". DOFs: " << x.dofs() << ". Norm: " << frob_norm(x.get_average_core()));
// bool maxRankReached = true;
// for(size_t k = 0; k+1 < x.degree(); ++k ) {
// for(size_t k = 0; k+1 < x.order(); ++k ) {
// maxRankReached = maxRankReached && (x.rank(k) == maxRanks[k]);
// }
......@@ -466,7 +466,7 @@ namespace xerus { namespace impl_TrASD {
if(P>1) { shuffle_sets(); }
// Forward sweep
for(size_t corePosition = 0; corePosition+1 < degree; ++corePosition) {
for(size_t corePosition = 0; corePosition+1 < order; ++corePosition) {
update_core(corePosition);
......@@ -474,10 +474,10 @@ namespace xerus { namespace impl_TrASD {
update_left_stack(corePosition);
}
update_core(degree-1);
update_core(order-1);
// Backward sweep
for(size_t corePosition = degree-1; corePosition > 0; --corePosition) {
for(size_t corePosition = order-1; corePosition > 0; --corePosition) {
update_core(corePosition);
x.move_core_left(rankEps, /*std::min(*/maxRanks[corePosition-1]/*, prevRanks[0][corePosition-1]+1)*/);
......
......@@ -41,7 +41,7 @@ namespace xerus {
for(size_t iteration = 0; iteration < _numHalfSweeps; iteration += 2) {
// Move right
for(size_t pos = 0; pos < _x.degree(); ++pos) { XERUS_REQUIRE_TEST;
for(size_t pos = 0; pos < _x.order(); ++pos) { XERUS_REQUIRE_TEST;
_x.move_core(pos);
std::pair<TensorNetwork, TensorNetwork> split = _x.chop(pos);
_x.component(pos)(rU, iX, rL) = split.first(iU&1, rU)*split.second(rL, iL&1)*_b(iU^pos, iX, iL&(pos+1));
......@@ -49,7 +49,7 @@ namespace xerus {
// Move left
if(iteration+1 < _numHalfSweeps) {
for(size_t pos = _x.degree()-2; pos > 0; --pos) { XERUS_REQUIRE_TEST;
for(size_t pos = _x.order()-2; pos > 0; --pos) { XERUS_REQUIRE_TEST;
_x.move_core(pos);
std::pair<TensorNetwork, TensorNetwork> split = _x.chop(pos);
_x.component(pos)(rU, iX, rL) = split.first(iU&1, rU)*split.second(rL, iL&1)*_b(iU^pos, iX, iL&(pos+1));
......
......@@ -28,7 +28,7 @@
namespace xerus {
double IHT(TTTensor& _x, const SinglePointMeasurementSet& _measurments, PerformanceData& _perfData) {
const size_t numMeasurments = _measurments.size();
const size_t degree = _x.degree();
const size_t order = _x.order();
const size_t USER_MEASUREMENTS_PER_ITR = numMeasurments;
const value_t ALPHA_CHG = 1.1;
......@@ -72,9 +72,9 @@ namespace xerus {
for (value_t beta = 1/ALPHA_CHG; beta < ALPHA_CHG*1.5; beta *= ALPHA_CHG) {
// Build the largeX
for(size_t d = 0; d < degree; ++d) {
for(size_t d = 0; d < order; ++d) {
Tensor& currComp = _x.component(d);
Tensor newComp({d == 0 ? 1 : (currComp.dimensions[0]+USER_MEASUREMENTS_PER_ITR), currComp.dimensions[1], d == degree-1 ? 1 : (currComp.dimensions[2]+USER_MEASUREMENTS_PER_ITR)});
Tensor newComp({d == 0 ? 1 : (currComp.dimensions[0]+USER_MEASUREMENTS_PER_ITR), currComp.dimensions[1], d == order-1 ? 1 : (currComp.dimensions[2]+USER_MEASUREMENTS_PER_ITR)});
// Copy dense part
for(size_t r1 = 0; r1 < currComp.dimensions[0]; ++r1) {
......@@ -90,12 +90,12 @@ namespace xerus {
for(size_t i = 0; i < USER_MEASUREMENTS_PER_ITR; ++i) {
newComp[{0, _measurments.positions[measurementOrder[i]][d], i+currComp.dimensions[2]}] = beta*alpha*(_measurments.measuredValues[measurementOrder[i]] - currentValues[measurementOrder[i]]);
}
} else if (d!=degree-1) {
} else if (d!=order-1) {
for(size_t i = 0; i < USER_MEASUREMENTS_PER_ITR; ++i) {
newComp[{i + currComp.dimensions[0], _measurments.positions[measurementOrder[i]][d], i+currComp.dimensions[2]}] = 1.0;
}
} else {
// d == degree-1
// d == order-1
for(size_t i = 0; i < USER_MEASUREMENTS_PER_ITR; ++i) {
newComp[{i + currComp.dimensions[0], _measurments.positions[measurementOrder[i]][d], 0}] = 1.0;
}
......@@ -111,16 +111,16 @@ namespace xerus {
// build stack from right to left
std::vector<Tensor> stack;
stack.emplace_back(Tensor::ones({1,1}));
for (size_t i=degree-1; i>0; --i) {
for (size_t i=order-1; i>0; --i) {
Tensor next;
next(i1,i2) = newX.get_component(i)(i1,i5,i3) * largeX.get_component(i)(i2,i5,i4) * stack.back()(i3,i4);
stack.emplace_back(next);