Commit ff68be30 authored by Sebastian Wolf's avatar Sebastian Wolf

Merge branch 'development' of git:xerus/xerus into development

parents b10a5c3f 47ed838a
Pipeline #1254 passed with stages
in 17 minutes and 55 seconds
......@@ -27,6 +27,42 @@ E.g. to install all dependencies on a fedora system execute
dnf install gcc-c++ openblas-devel suitesparse-devel lapack-devel boost-devel binutils-devel
~~~
Or to install all dependencies for Ubuntu (tested for 18.04) execute:
~~~ bash
apt-get update
apt install make g++ libboost-all-dev binutils-dev libsuitesparse-dev libz-dev libiberty-dev libopenblas-dev liblapack-dev liblapacke-dev gfortran
~~~
To build the optional ARPACK extension, you need to install ARPACK and the c++ headers by hand. First clone the git project to an appropriate place, enter the project directory, configure the sources and make the library:
~~~ bash
git clone https://github.com/opencollab/arpack-ng.git
cd arpack-ng
apt-get install autoconf
sh bootstrap
./configure F77=gfortran --enable-icb
make
make check
~~~
You can add your own installation path with --prefix=<installation-path> for the configure command.
Unfortunately, the C wrapper is broken. You need to modify the file_exists arpack.hpp:
~~~ bash
cd ICB/
vi arpack.hpp
~~~
In arpack.hpp comment out all 4 instances of the function neupd.
After that, to install the packages type:
~~~ bash
make install
~~~
Once ARPACK is installed you need to modify the xerus config file. Just remove the hashtags in front of ARPACK_LIBRARIES and the addtional flag below:
~~~ bash
# (optional) Uncomment if needed, iterative eigenvalue solver ARPACK-ng, see https://github.com/opencollab/arpack-ng
ARPACK_LIBRARIES = -larpack
OTHER += -DARPACK_LIBRARIES
~~~
To build the python bindings you will furthermore need the python development headers, `numpy` as well as `boost-python` or
`boost-python3` depending on the python version you wish to use. E.g. for a fedora system and if you want to use python 2 simply execute
~~~ bash
......
......@@ -56,26 +56,89 @@ namespace xerus {
* a seriously low level implementation of a critical part of an algorithm is required.
*/
namespace arpackWrapper {
///@brief: Solves Ax = lambda*x for x, this calls the Arpack Routine dsaupd
/**
* @brief: Solves Ax = lambda*x for x, this calls the Arpack Routine dsaupd, executes the Lanczos method
* @param _x on input can be start value for iterative scheme on output it is the eigenvector
* @param _A operator data as double array
* @param _ev on output contains the requested eigenvalue
* @param _k number of eigenvalues wanted
* @param _n dimension of the operator
* @param _resid work array for the algorithm,
* @param _maxiter number of maximal iterations, when reached will exit with error!
* @param _eps accuracy of the eigenvalue, highly influeces the number of iterations needed
* @param _ritzoption specifies if the smallest or largest eigenvalue is needed, values can be arpack::which::smallest_algebraic and arpack::which::largest_algebraic
* @param _info value which specifies if _x should be used as start vector or if the algorithm should be initialized randomly by the routine
*/
void solve_ev(double* const _x, const double* const _A, double* const _ev, const size_t _k, const size_t _n, double* const _resid, const size_t _maxiter, const double _eps, arpack::which const _ritz_option, int _info);
///@brief: Solves Ax = lambda*x for x, for the smallest _k eigenvalues
/**
* @brief: Solves Ax = lambda*x for x, for the smallest _k eigenvalues
* @param _x on input can be start value for iterative scheme on output it is the eigenvector
* @param _A operator data as double array
* @param _ev on output contains the requested eigenvalue
* @param _k number of eigenvalues wanted
* @param _n dimension of the operator
* @param _resid work array for the algorithm,
* @param _maxiter number of maximal iterations, when reached will exit with error!
* @param _eps accuracy of the eigenvalue, highly influeces the number of iterations needed
* @param _info value which specifies if _x should be used as start vector or if the algorithm should be initialized randomly by the routine
*/
void solve_ev_smallest(double* const _x, const double* const _A, double* const _ev, const size_t _k, const size_t _n, double* const _resid, const size_t _maxiter, const double _eps, int _info);
///@brief: Solves Ax = lambda*x for x, for the largest _k eigenvalues
/**
* @brief: Solves Ax = lambda*x for x, for the largest _k eigenvalues
* @param _x on input can be start value for iterative scheme on output it is the eigenvector
* @param _A operator data as double array
* @param _ev on output contains the requested eigenvalue
* @param _k number of eigenvalues wanted
* @param _n dimension of the operator
* @param _resid work array for the algorithm,
* @param _maxiter number of maximal iterations, when reached will exit with error!
* @param _eps accuracy of the eigenvalue, highly influeces the number of iterations needed
* @param _info value which specifies if _x should be used as start vector or if the algorithm should be initialized randomly by the routine
*/
void solve_ev_largest(double* const _x, const double* const _A, double* const _ev, const size_t _k, const size_t _n, double* const _resid, const size_t _maxiter, const double _eps, int _info);
//TODO check if this can be simplified!!
///@brief: Solves Ax = lambda*x for x, this calls the Arpack Routine dsaupd
/**
* @brief: Solves Ax = lambda*x for x, this calls the Arpack Routine dsaupd, executes the Lanczos method
* @param _x on input can be start value for iterative scheme on output it is the eigenvector
* @param _op operator data as a Tensor network
* @param _ev on output contains the requested eigenvalue
* @param _k number of eigenvalues wanted
* @param _n dimension of the operator
* @param _resid work array for the algorithm,
* @param _maxiter number of maximal iterations, when reached will exit with error!
* @param _eps accuracy of the eigenvalue, highly influeces the number of iterations needed
* @param _ritzoption specifies if the smallest or largest eigenvalue is needed, values can be arpack::which::smallest_algebraic and arpack::which::largest_algebraic
* @param _info value which specifies if _x should be used as start vector or if the algorithm should be initialized randomly by the routine
*/
void solve_ev_special(double* const _x, const TensorNetwork& _op, double* const _ev, const size_t _k, const size_t _n, double* const _resid, const size_t _maxiter, const double _eps, arpack::which const _ritz_option, int _info);
///@brief: Solves Ax = lambda*x for x, for the smallest _k eigenvalues, takes Tensor Network as Operator
/**
* @brief: Solves Ax = lambda*x for x, for the smallest _k eigenvalues
* @param _x on input can be start value for iterative scheme on output it is the eigenvector
* @param _op operator data as a Tensor network
* @param _ev on output contains the requested eigenvalue
* @param _k number of eigenvalues wanted
* @param _n dimension of the operator
* @param _resid work array for the algorithm,
* @param _maxiter number of maximal iterations, when reached will exit with error!
* @param _eps accuracy of the eigenvalue, highly influeces the number of iterations needed
* @param _info value which specifies if _x should be used as start vector or if the algorithm should be initialized randomly by the routine
*/
void solve_ev_smallest_special(double* const _x, const TensorNetwork& _op, double* const _ev, const size_t _k, const size_t _n, double* const _resid, const size_t _maxiter, const double _eps, int _info);
//@brief: Solves Ax = lambda*x for x, for the largest _k eigenvalues, takes Tensor Network as Operator
/**
* @brief: Solves Ax = lambda*x for x, for the largest _k eigenvalues
* @param _x on input can be start value for iterative scheme on output it is the eigenvector
* @param _op operator data as a Tensor network
* @param _ev on output contains the requested eigenvalue
* @param _k number of eigenvalues wanted
* @param _n dimension of the operator
* @param _resid work array for the algorithm,
* @param _maxiter number of maximal iterations, when reached will exit with error!
* @param _eps accuracy of the eigenvalue, highly influeces the number of iterations needed
* @param _info value which specifies if _x should be used as start vector or if the algorithm should be initialized randomly by the routine
*/
void solve_ev_largest_special(double* const _x, const TensorNetwork& _op, double* const _ev, const size_t _k, const size_t _n, double* const _resid, const size_t _maxiter, const double _eps, int _info);
}
}
#endif
......
......@@ -168,6 +168,7 @@ namespace xerus {
}
dimensions = _tensor.dimensions;
Tensor remains;
auto epsPerSite = numComp < 2 ? _eps : misc::hard_equal(_eps, 0.0) ? EPSILON : _eps / std::sqrt(double(2*numComp-3));
......@@ -213,7 +214,8 @@ namespace xerus {
xerus::reshuffle(remains, remains, ithmode);
calculate_svd(newNode, singularValues, remains, remains, N, _maxRanks[pos - 1], _eps);
calculate_svd(newNode, singularValues, remains, remains, N, _maxRanks[pos - 1], epsPerSite);
if (isOperator){
xerus::reshuffle(newNode, newNode, {1,2,0});
} else {
......@@ -247,7 +249,7 @@ namespace xerus {
}
xerus::reshuffle(remains, remains, ithmode);
calculate_svd(newNode, singularValues, remains, remains, 2, _maxRanks[numCompOnLvl + pos - 2], _eps); // TODO fix maxRanks
calculate_svd(newNode, singularValues, remains, remains, 2, _maxRanks[numCompOnLvl + pos - 2], epsPerSite); // TODO fix maxRanks
xerus::reshuffle(newNode, newNode, {1,2,0}); // first parent then children
set_component(numCompOnLvl + pos - 1, std::move(newNode));
newNode.reset();
......@@ -727,8 +729,12 @@ namespace xerus {
canonicalize_root();
for (size_t n = numComponents - 1; n > 0; --n) {
round_edge(n, (n + 1) / 2 - 1, _maxRanks[n - 1], _eps, 0.0);
if(numComponents > 1) {
auto epsPerSite = misc::hard_equal(_eps, 0.0) ? EPSILON : _eps / std::sqrt(double(2*numComponents-3)); // Taken from HIERARCHICAL SINGULAR VALUE DECOMPOSITION OF TENSORS
// from Lars Grasedyck
for (size_t n = numComponents - 1; n > 0; --n) {
round_edge(n, (n + 1) / 2 - 1, _maxRanks[n - 1], epsPerSite, 0.0);
}
}
assume_core_position(0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment