Commit c87eef84 authored by Sebastian Wolf's avatar Sebastian Wolf

Corrected Spelling mistake

parent 9534f71f
......@@ -64,7 +64,7 @@ make install
## Compiling your own Applications Using Xerus
If `xerus` is propperly installed on your system, compiling your own applications using `xerus` is as simple as using any other library. Just include `-lxerus` in your linker call and make sure to use
If `xerus` is properly installed on your system, compiling your own applications using `xerus` is as simple as using any other library. Just include `-lxerus` in your linker call and make sure to use
`-std=c++11` or `-std=c++14` in all compilation units that include `xerus.h` and everything should work.
If you want to use the static version of `xerus` you also have to include all libraries `xerus` depends on in your linker call. In particular these are lapacke (`-llapacke`),
......
......@@ -54,7 +54,7 @@ namespace xerus {
ContractedTNCache rhsCache; ///< stacks for the right-hand-side (either xb or xAtb)
value_t normB; ///< norm of the (global) right hand side
std::pair<size_t, size_t> optimizedRange; ///< range of indices for the nodes of _x that need to be optimized
bool cannonicalizeAtTheEnd; ///< whether _x should be cannonicalized at the end
bool canonicalizeAtTheEnd; ///< whether _x should be canonicalized at the end
size_t corePosAtTheEnd; ///< core position that should be restored at the end of the algorithm
std::function<value_t()> energy_f; ///< the energy functional used for this calculation
std::function<value_t()> residual_f; ///< the functional to calculate the current residual
......@@ -69,7 +69,7 @@ namespace xerus {
/**
* @brief Finds the range of notes that need to be optimized and orthogonalizes @a _x properly
* @details finds full-rank nodes (these can wlog be set to identity and need not be optimized)
* requires cannonicalizeAtTheEnd and corePosAtTheEnd to be set
* requires canonicalizeAtTheEnd and corePosAtTheEnd to be set
* sets optimizedRange
* modifies x
*/
......
......@@ -47,12 +47,12 @@ namespace xerus {
///@brief The number of external links in each node, i.e. one for TTTensors and two for TTOperators.
static constexpr const size_t N = isOperator?2:1;
/// @brief Flag indicating whether the TTNetwork is cannonicalized.
bool cannonicalized;
/// @brief Flag indicating whether the TTNetwork is canonicalized.
bool canonicalized;
/**
* @brief The position of the core.
* @details If cannonicalized is TRUE, corePosition gives the position of the core tensor. All components
* @details If canonicalized is TRUE, corePosition gives the position of the core tensor. All components
* with smaller index are then left-orthogonalized and all components with larger index right-orthogonalized.
*/
size_t corePosition;
......@@ -196,7 +196,7 @@ namespace xerus {
result.require_correct_format();
XERUS_INTERNAL_CHECK(!result.exceeds_maximal_ranks(), "Internal Error");
result.cannonicalize_left();
result.canonicalize_left();
return result;
}
......@@ -396,7 +396,7 @@ namespace xerus {
/**
* @brief Move the core to a new position.
* @details The core is moved to @a _position and the nodes between the old and the new position are orthogonalized
* accordingly. If the TTNetwork is not yet cannonicalized it will be with @a _position as new corePosition.
* accordingly. If the TTNetwork is not yet canonicalized it will be with @a _position as new corePosition.
* @param _position the new core position.
* @param _keepRank by default a rank revealing QR decomposition is used to move the core and the ranks are reduced
* accordingly. If @a _keepRank is set the rank is not reduced, this is need e.g. in the ALS.
......@@ -416,14 +416,14 @@ namespace xerus {
* @brief Move the core to the left.
* @details Basically calls move_core() with _position = 0
*/
void cannonicalize_left();
void canonicalize_left();
/**
* @brief Move the core to the left.
* @details Basically calls move_core() with _position = degree()-1
*/
void cannonicalize_right();
void canonicalize_right();
/**
......@@ -450,7 +450,7 @@ namespace xerus {
/**
* @brief Tests whether the network resembles that of a TTTensor and checks consistency with the underlying tensor objects.
* @details Note that this will NOT check for orthogonality of cannonicalized TTNetworks.
* @details Note that this will NOT check for orthogonality of canonicalized TTNetworks.
*/
virtual void require_correct_format() const override;
......
......@@ -81,7 +81,7 @@ static misc::UnitTest tt_rw("TT", "read_write_file", [](){
TTTensor Ab = misc::load_from_file<TTTensor>("test.dat");
Index i;
Ab.require_correct_format();
MTEST(Ab.cannonicalized && Ab.corePosition == 0, Ab.cannonicalized << " " << Ab.corePosition);
MTEST(Ab.canonicalized && Ab.corePosition == 0, Ab.canonicalized << " " << Ab.corePosition);
MTEST(A.dimensions == Ab.dimensions, A.dimensions << " vs " << Ab.dimensions);
MTEST(frob_norm(A(i&0)-Ab(i&0))/frob_norm(A) < 6e-16, frob_norm(A(i&0)-Ab(i&0))/frob_norm(A));
});
......@@ -106,7 +106,7 @@ UNIT_TEST(Strassen, CP,
// LOG(test, "\t\t\t\t" << newMicroRes);
while (std::abs(1-microItrRes/newMicroRes) > 1e-4) {
microItrRes = newMicroRes;
ttDiff.cannonicalize_left();
ttDiff.canonicalize_left();
tn0(i,r1) = diff(i,j,k) * tn1(r1,j,r2) * tn2(r2,k);
tn0 /= tn0.frob_norm();
tn1(r1,j,r2) = diff(i,j,k) * tn0(i,r1) * tn2(r2,k);
......
......@@ -191,9 +191,9 @@ static misc::UnitTest tt_dyadic("TT", "dyadic_product", [](){
TTOperator O = dyadic_product<true>({o1,o2,o3});
TTTensor S = dyadic_product<false>({s1,s2,s3});
TEST(O.cannonicalized);
TEST(O.canonicalized);
MTEST(O.corePosition == 0, O.corePosition);
TEST(S.cannonicalized);
TEST(S.canonicalized);
MTEST(S.corePosition == 0, S.corePosition);
Index i,j;
......@@ -205,7 +205,7 @@ static misc::UnitTest tt_dyadic("TT", "dyadic_product", [](){
TEST(std::abs(R - r1*r2*r3) < 1e-12);
S = dyadic_product(S,TTTensor::ones({10})) + dyadic_product(TTTensor::ones({10}), S);
TEST(S.cannonicalized);
TEST(S.canonicalized);
MTEST(S.corePosition == 0, S.corePosition);
Tensor e0({10}, [&](const std::vector<size_t> &_idx){
......@@ -216,7 +216,7 @@ static misc::UnitTest tt_dyadic("TT", "dyadic_product", [](){
});
S *= 1/std::sqrt(2);
S = dyadic_product(S,TTTensor(e0)) + dyadic_product(TTTensor(e1), S);
TEST(S.cannonicalized);
TEST(S.canonicalized);
MTEST(S.corePosition == 0, S.corePosition);
});
......@@ -427,7 +427,7 @@ namespace xerus {
#pragma omp for schedule(static)
for(size_t i = 0; i < numMeasurments; ++i) {
contract(currentValue, *forwardStack[i + _corePosition*numMeasurments], false, *backwardStack[i + (_corePosition+1)*numMeasurments], false, 1);
partialNormAProjGrad[position_or_zero(measurments, i, _corePosition)] += misc::sqr(currentValue[0]*measurmentNorms[i]); // TODO measurmentNorms
partialNormAProjGrad[position_or_zero(measurments, i, _corePosition)] += misc::sqr(currentValue[0]/**measurmentNorms[i]*/); // TODO measurmentNorms
}
// Accumulate the partical components
......@@ -448,7 +448,7 @@ namespace xerus {
#pragma omp for schedule(static)
for(size_t i = 0; i < numMeasurments; ++i) {
contract(currentValue, *forwardStack[i + (_corePosition-1)*numMeasurments], false, *backwardStack[i + _corePosition*numMeasurments], false, 1);
partialNormAProjGrad[position_or_zero(measurments, i, _corePosition)] += misc::sqr(currentValue[0]*measurmentNorms[i]); // TODO measurmentNorms
partialNormAProjGrad[position_or_zero(measurments, i, _corePosition)] += misc::sqr(currentValue[0]/**measurmentNorms[i]*/); // TODO measurmentNorms
}
// Accumulate the partical components
......@@ -579,7 +579,7 @@ namespace xerus {
calc_measurment_norm(measurmentNorms.get(), measurments);
// We need x to be canonicalized in the sense that there is no edge with more than maximal rank (prior to stack resize).
x.cannonicalize_left();
x.canonicalize_left();
resize_stack_tensors();
......
......@@ -98,7 +98,7 @@ namespace xerus {
/**
* @brief Finds the range of notes that need to be optimized and orthogonalizes @a _x properly
* @details finds full-rank nodes (these can wlog be set to identity and need not be optimized)
* requires cannonicalizeAtTheEnd and corePosAtTheEnd to be set
* requires canonicalizeAtTheEnd and corePosAtTheEnd to be set
* sets optimizedRange
* modifies x
*/
......@@ -168,10 +168,10 @@ namespace xerus {
dimensionProd = newDimensionProd;
}
if (cannonicalizeAtTheEnd && corePosAtTheEnd < firstOptimizedIndex) {
if (canonicalizeAtTheEnd && corePosAtTheEnd < firstOptimizedIndex) {
x.assume_core_position(firstOptimizedIndex);
} else {
if (cannonicalizeAtTheEnd && corePosAtTheEnd >= firstNotOptimizedIndex) {
if (canonicalizeAtTheEnd && corePosAtTheEnd >= firstNotOptimizedIndex) {
x.assume_core_position(firstNotOptimizedIndex-1);
}
......@@ -323,7 +323,7 @@ namespace xerus {
: ALS(_ALS), A(_A), x(_x), b(_b)
, targetRank(_x.ranks())
, normB(frob_norm(_b))
, cannonicalizeAtTheEnd(_x.cannonicalized)
, canonicalizeAtTheEnd(_x.canonicalized)
, corePosAtTheEnd(_x.corePosition)
, lastEnergy2(1e102)
, lastEnergy(1e101)
......@@ -454,7 +454,7 @@ namespace xerus {
{
// we are done! yay
LOG(ALS, "ALS done, " << _data.energy << " " << _data.lastEnergy << " " << std::abs(_data.lastEnergy2-_data.energy) << " " << std::abs(_data.lastEnergy-_data.energy) << " < " << _convergenceEpsilon);
if (_data.cannonicalizeAtTheEnd && preserveCorePosition) {
if (_data.canonicalizeAtTheEnd && preserveCorePosition) {
_data.x.move_core(_data.corePosAtTheEnd, true);
}
return true;
......
......@@ -80,7 +80,7 @@ namespace xerus {
}
TTTangentVector::TTTangentVector(const TTTensor& _base, const TTTensor& _direction) {
REQUIRE(_base.cannonicalized && _base.corePosition == 0, "projection onto tangent plane is only implemented for core position 0 at the moment");
REQUIRE(_base.canonicalized && _base.corePosition == 0, "projection onto tangent plane is only implemented for core position 0 at the moment");
REQUIRE(_base.dimensions == _direction.dimensions, "");
baseL = _base;
......@@ -281,7 +281,7 @@ namespace xerus {
// TODO do this without creating the change_direction tensor?
void ProjectiveVectorTransport(const TTTensor &_newBase, TTTangentVector &_tangentVector) {
REQUIRE(_newBase.cannonicalized && _newBase.corePosition == 0, "Tangent vectors only implemented for core position 0 atm");
REQUIRE(_newBase.canonicalized && _newBase.corePosition == 0, "Tangent vectors only implemented for core position 0 atm");
_tangentVector = TTTangentVector(_newBase, TTTensor(_tangentVector));
}
......
......@@ -542,7 +542,7 @@ BOOST_PYTHON_MODULE(xerus) {
.def(init<const TTTensor &>())
.def("get_component", &TTTensor::get_component, return_value_policy<copy_const_reference>())
.def("set_component", &TTTensor::set_component)
.def_readonly("cannonicalized", &TTTensor::cannonicalized)
.def_readonly("canonicalized", &TTTensor::canonicalized)
.def_readonly("corePosition", &TTTensor::corePosition)
.def("ranks", &TTTensor::ranks)
.def("rank", &TTTensor::rank)
......@@ -565,11 +565,11 @@ BOOST_PYTHON_MODULE(xerus) {
return boost::python::make_tuple(result.first, result.second);
}, arg("position"))
.def("round", static_cast<void (TTTensor::*)(const std::vector<size_t>&, double)>(&TTTensor::round),
(arg("ranks"), arg("epsilon")=EPSILON)
)
// .def("round", static_cast<void (TTTensor::*)(const std::vector<size_t>&, double)>(&TTTensor::round),
// (arg("ranks"), arg("epsilon")=EPSILON)
// )
.def("round", static_cast<void (TTTensor::*)(double)>(&TTTensor::round))
.def("round", static_cast<void (TTTensor::*)(size_t)>(&TTTensor::round))
// .def("round", static_cast<void (TTTensor::*)(size_t)>(&TTTensor::round))
.def("soft_threshold", static_cast<void (TTTensor::*)(const double, const bool)>(&TTTensor::soft_threshold),
(arg("tau"), arg("preventZero")=false)
......@@ -583,8 +583,8 @@ BOOST_PYTHON_MODULE(xerus) {
)
.def("assume_core_position", &TTTensor::assume_core_position)
.def("cannonicalize_left", &TTTensor::cannonicalize_left)
.def("cannonicalize_right", &TTTensor::cannonicalize_right)
.def("canonicalize_left", &TTTensor::canonicalize_left)
.def("canonicalize_right", &TTTensor::canonicalize_right)
.def(self + self)
.def(self - self)
.def(self * other<value_t>())
......@@ -606,7 +606,7 @@ BOOST_PYTHON_MODULE(xerus) {
.def(init<const TTOperator &>())
.def("get_component", &TTOperator::get_component, return_value_policy<copy_const_reference>())
.def("set_component", &TTOperator::set_component)
.def_readonly("cannonicalized", &TTOperator::cannonicalized)
.def_readonly("canonicalized", &TTOperator::canonicalized)
.def_readonly("corePosition", &TTOperator::corePosition)
.def("ranks", &TTOperator::ranks)
.def("rank", &TTOperator::rank)
......@@ -647,8 +647,8 @@ BOOST_PYTHON_MODULE(xerus) {
)
.def("assume_core_position", &TTOperator::assume_core_position)
.def("cannonicalize_left", &TTOperator::cannonicalize_left)
.def("cannonicalize_right", &TTOperator::cannonicalize_right)
.def("canonicalize_left", &TTOperator::canonicalize_left)
.def("canonicalize_right", &TTOperator::canonicalize_right)
.def(self + self)
.def(self - self)
.def(self += self)
......
This diff is collapsed.
......@@ -161,10 +161,10 @@ namespace xerus {
TTNetwork<isOperator> result;
static_cast<TensorNetwork&>(result) = static_cast<TensorNetwork&>(*this);
if(cannonicalization_required) {
result.cannonicalized = false;
result.canonicalized = false;
result.move_core(futureCorePosition);
} else {
result.cannonicalized = true;
result.canonicalized = true;
result.corePosition = futureCorePosition;
}
result.require_correct_format();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment