Commit 6760be36 authored by Ben Huber's avatar Ben Huber

added Tensor.one_norm() and changed residual in ALS to be relative to norm of rhs

parent 4112646f
Pipeline #713 failed with stages
in 7 minutes and 43 seconds
......@@ -10,7 +10,8 @@ Potentially breaking changes are marked with an exclamation point '!' at the beg
* ! Tensor::modify_diag_elements renamed to Tensor::modify_diagonal_entries for naming consistency.
* Much faster solve of matrix equations Ax=b by exploiting symmetry and definiteness where possible. This directly speeds up the ALS as well.
* Added a highly optimized minimal version of the ALS algorithm as xALS.
* Some minor bugfixes.
* Added Tensor.one_norm() and one_norm(Tensor) to calculate the one norm of a Tensor.
* Some minor bugfixes and performance improvements.
* 2016-06-23 v2.4.0
* Introduced nomeclature 'mode'. Marked all functions that will be renamed / removed in v3.0.0 as deprecated.
......
......@@ -308,6 +308,18 @@ A = xerus.entrywise_product(A, A)
## Output and Storing
In the above examples we have already seen two kind of queries to the `Tensor` objects: [.to_string()](\ref xerus::Tensor::to_string())
to obtain a human readable string representation of the Tensor and [.frob_norm()](\ref xerus::Tensor::frob_norm()) or equivalently
[frob_norm(Tensor))(\ref xerus::frob_norm(Tensor)) to obtain the Frobenius norm of a Tensor. Note for the former, that it is
meant purely for debugging purposes, in particular of smaller objects, and it is not adequately possible to reconstruct the original
Tensor from this output.
Storing Tensors to files such that they can be reconstructed exactly from those is instead possible with [save_to_file()](\ref xerus::misc::save_to_file())
and respectively [load_from_file()](\ref xerus::misc::load_from_file()).
~~~.cpp
~~~
## Advanced Use and Ownership of Data
......
......@@ -38,7 +38,10 @@ namespace xerus {
//----------------------------------------------- LEVEL I BLAS ----------------------------------------------------------
///@brief: Computes the two norm =||x||
///@brief: Computes the one norm =||x||_1
double one_norm(const double* const _x, const size_t _n);
///@brief: Computes the two norm =||x||_2
double two_norm(const double* const _x, const size_t _n);
///@brief: Computes the dot product = x^T*y
......
......@@ -153,6 +153,8 @@ namespace xerus {
template<class tensor_type>
value_t frob_norm(const IndexedTensorReadOnly<tensor_type>& _idxTensor);
///@brief Returns the one-norm of the associated tensor Obejct.
value_t one_norm(const IndexedTensorReadOnly<Tensor>& _idxTensor);
size_t get_eval_degree(const std::vector<Index>& _indices);
}
......
......@@ -436,6 +436,12 @@ namespace xerus {
*/
value_t frob_norm() const;
/**
* @brief Calculates the 1-norm of the tensor.
* @return the 1-norm.
*/
value_t one_norm() const;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - Basic arithmetics - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/**
......@@ -923,6 +929,13 @@ namespace xerus {
*/
static XERUS_force_inline value_t frob_norm(const Tensor& _tensor) { return _tensor.frob_norm(); }
/**
* @brief Calculates the 1-norm of the given tensor
* @param _tensor the Tensor of which the norm shall be calculated.
* @return the 1-norm
*/
static XERUS_force_inline value_t one_norm(const Tensor& _tensor) { return _tensor.one_norm(); }
/**
* @brief Low-Level SVD calculation of a given Tensor @a _input = @a _U @a _S @a _Vt.
* @param _U Output Tensor for the resulting U.
......
......@@ -403,7 +403,6 @@ namespace xerus {
* @details Checks whether all links in the network are set consistently and matching the
* underlying tensor objects. This also checks whether the additional constrains of the specific
* format (if any) are fullfilled.
* @return TRUE if the sanity check passes. If not an exception is thrown.
*/
virtual void require_correct_format() const;
......@@ -467,11 +466,6 @@ namespace xerus {
*/
virtual void fix_mode(const size_t _mode, const size_t _slatePosition);
__attribute__((deprecated("function has been renamed. please use 'fix_mode'")))
void fix_slate(const size_t _dimPos, const size_t _slatePosition) {
fix_mode(_dimPos, _slatePosition);
}
/**
* @brief removes the given @a _slatePosition from the @a _mode. this reduces the given dimension by one
*/
......@@ -487,11 +481,6 @@ namespace xerus {
*/
virtual void resize_mode(const size_t _mode, const size_t _newDim, const size_t _cutPos=~0ul);
__attribute__((deprecated("function has been renamed. please use 'resize_mode'")))
void resize_dimension(const size_t _mode, const size_t _newDim, const size_t _cutPos=~0ul) {
resize_mode(_mode, _newDim, _cutPos);
}
/**
* @brief Contracts the nodes with indices @a _nodeId1 and @a _nodeId2.
* @details Replaces @a _nodeId1 with the contraction and erases @a _nodeId2.
......@@ -541,33 +530,33 @@ namespace xerus {
TensorNetwork operator/(TensorNetwork &_lhs, value_t _factor);
/**
* @brief Calculates the frobenious norm of the given TensorNetwork.
* @param _network the TensorNetwork of which the frobenious norm shall be calculated.
* @return the frobenious norm.
*/
* @brief Calculates the frobenious norm of the given TensorNetwork.
* @param _network the TensorNetwork of which the frobenious norm shall be calculated.
* @return the frobenious norm.
*/
static XERUS_force_inline value_t frob_norm(const TensorNetwork& _network) { return _network.frob_norm(); }
/**
* @brief Checks whether two TensorNetworks are approximately equal.
* @details Check whether ||@a _a - @a _b ||/(||@a a ||/2 + ||@a _b ||/2) < @a _eps, i.e. whether the relative difference in the frobenius norm is sufficently small.
* @param _a the first test candidate.
* @param _b the second test candidate
* @param _eps the maximal relative difference between @a _a and @a _b.
* @return TRUE if @a _a and @a _b are determined to be approximately equal, FALSE otherwise.
*/
* @brief Checks whether two TensorNetworks are approximately equal.
* @details Check whether ||@a _a - @a _b ||/(||@a a ||/2 + ||@a _b ||/2) < @a _eps, i.e. whether the relative difference in the frobenius norm is sufficently small.
* @param _a the first test candidate.
* @param _b the second test candidate
* @param _eps the maximal relative difference between @a _a and @a _b.
* @return TRUE if @a _a and @a _b are determined to be approximately equal, FALSE otherwise.
*/
bool approx_equal(const TensorNetwork& _a, const TensorNetwork& _b, const value_t _eps = EPSILON);
/**
* @brief Convinience wrapper, casts the the given TensorNetwork @a _a to Tensor and calls the Tensor function.
*/
* @brief Convinience wrapper, casts the the given TensorNetwork @a _a to Tensor and calls the Tensor function.
*/
bool approx_equal(const TensorNetwork& _a, const Tensor& _b, const value_t _eps = EPSILON);
/**
* @brief Convinience wrapper, casts the the given TensorNetwork @a _b to Tensor and calls the Tensor function.
*/
* @brief Convinience wrapper, casts the the given TensorNetwork @a _b to Tensor and calls the Tensor function.
*/
bool approx_equal(const Tensor& _a, const TensorNetwork& _b, const value_t _eps = EPSILON);
......
......@@ -78,8 +78,8 @@ static misc::UnitTest als_real("ALS", "real", []() {
b(k&0) = A(k/2,l/2)*realX(l&0);
const value_t result = ALS(A, x, b, 1e-6);
MTEST(result < 0.001, result);
const value_t result = ALS(A, x, b, 1e-7);
MTEST(result < 1e-7, result);
MTEST(frob_norm(x - realX) < 1e-4, frob_norm(x - realX));
});
......@@ -103,7 +103,7 @@ static misc::UnitTest als_proj("ALS", "projectionALS", [](){
#include <iomanip>
#include <fstream>
/*
static misc::UnitTest als_tut("ALS", "tutorial", [](){
xerus::Index i,j,k;
......@@ -159,4 +159,4 @@ static misc::UnitTest als_tut("ALS", "tutorial", [](){
// ALSb(A, X, B, 1e-4, &perfdata);
// TEST(!misc::approx_equal(frob_norm(A(i^d, j^d)*X(j&0) - B(i&0)), 0., 1.));
// std::cout << perfdata << std::endl;
});
});*/
......@@ -56,6 +56,19 @@ static Tensor::DimensionTuple random_dimensions(const size_t _degree, const size
}
static misc::UnitTest tensor_norms("Tensor", "one_norm", [](){
Index i;
Tensor A = Tensor::ones({100,100});
MTEST(misc::approx_equal(A.one_norm(), 100.0*100), A.one_norm());
MTEST(misc::approx_equal(one_norm(A), 100.0*100), A.one_norm());
MTEST(misc::approx_equal(one_norm(A(i&0)), 100.0*100), A.one_norm());
MTEST(misc::approx_equal(A.frob_norm(), 100.0), A.frob_norm());
A = Tensor::identity({100,100});
MTEST(misc::approx_equal(A.one_norm(), 100.0), A.one_norm());
MTEST(misc::approx_equal(A.frob_norm(), 10.0), A.frob_norm());
});
static misc::UnitTest tensor_rand_ortho("Tensor", "random_orthogonal", [](){
Index i,j,k;
Tensor Q = Tensor::random_orthogonal({3,15}, {6,7});
......
......@@ -257,7 +257,7 @@ namespace xerus {
if (ALS.assumeSPD) {
residual_f = [&](){
Index n1, n2;
return frob_norm((*A)(n1/2,n2/2)*x(n2&0) - b(n1&0));
return frob_norm((*A)(n1/2,n2/2)*x(n2&0) - b(n1&0))/normB;
};
if (ALS.useResidualForEndCriterion) {
energy_f = residual_f;
......@@ -291,7 +291,7 @@ namespace xerus {
}
res() = xAtAx(r1&0) * localOperatorCache.right.back()(r1&0)
- 2 * bAx(r1&0) * rhsCache.right.back()(r1&0);
return res[0] + misc::sqr(normB);
return std::sqrt(res[0] + misc::sqr(normB))/normB;
};
energy_f = residual_f;
}
......
......@@ -73,6 +73,18 @@ namespace xerus {
//----------------------------------------------- LEVEL I BLAS ----------------------------------------------------------
double one_norm(const double* const _x, const size_t _n) {
REQUIRE(_n <= static_cast<size_t>(std::numeric_limits<int>::max()), "Dimension to large for BLAS/Lapack");
XERUS_PA_START;
const double result = cblas_dasum(static_cast<int>(_n), _x, 1);
XERUS_PA_END("Dense BLAS", "One Norm", misc::to_string(_n));
return result;
}
double two_norm(const double* const _x, const size_t _n) {
REQUIRE(_n <= static_cast<size_t>(std::numeric_limits<int>::max()), "Dimension to large for BLAS/Lapack");
......
......@@ -334,6 +334,10 @@ namespace xerus {
template value_t frob_norm<Tensor>(const IndexedTensorReadOnly<Tensor>& _idxTensor);
template value_t frob_norm<TensorNetwork>(const IndexedTensorReadOnly<TensorNetwork>& _idxTensor);
value_t one_norm(const IndexedTensorReadOnly<Tensor>& _idxTensor) {
return _idxTensor.tensorObjectReadOnly->one_norm();
}
size_t get_eval_degree(const std::vector<Index>& _indices) {
size_t degree = 0;
for(const Index& idx : _indices) {
......
......@@ -414,6 +414,7 @@ BOOST_PYTHON_MODULE(xerus) {
.def("degree", &Tensor::degree)
.def_readonly("factor", &Tensor::factor)
.def_readonly("size", &Tensor::size)
.def("one_norm", &Tensor::one_norm)
.def("frob_norm", &Tensor::frob_norm)
.def("random",
+[](std::vector<size_t> _dim) {
......@@ -543,6 +544,8 @@ BOOST_PYTHON_MODULE(xerus) {
;
} // close Tensor_scope
variable_argument_member_to_tuple_wrapper("Tensor.__call__", "TensorCallOperator");
//def("frob_norm", +[](const Tensor& _x){ return _x.frob_norm(); }); // see below
def("one_norm", +[](const Tensor& _x){ return _x.one_norm(); });
def("reshuffle", static_cast<Tensor(*)(const Tensor&, const std::vector<size_t>&)>(&reshuffle));
def("contract", static_cast<Tensor(*)(const Tensor&, bool, const Tensor&, bool, size_t)>(&contract));
def("pseudo_inverse", static_cast<Tensor(*)(const Tensor&, size_t)>(&pseudo_inverse));
......
......@@ -272,16 +272,26 @@ namespace xerus {
}
value_t Tensor::one_norm() const {
if(is_dense()) {
return std::abs(factor)*blasWrapper::one_norm(denseData.get(), size);
}
value_t norm = 0;
for(const auto& entry : *sparseData) {
norm += std::abs(entry.second);
}
return std::abs(factor)*norm;
}
value_t Tensor::frob_norm() const {
if(is_dense()) {
return std::abs(factor)*blasWrapper::two_norm(denseData.get(), size);
}
value_t norm = 0;
for(const auto& entry : *sparseData) {
norm += misc::sqr(entry.second);
}
return std::abs(factor)*sqrt(norm);
value_t norm = 0;
for(const auto& entry : *sparseData) {
norm += misc::sqr(entry.second);
}
return std::abs(factor)*sqrt(norm);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment