Commit cba24b4e authored by Fuchsi*'s avatar Fuchsi*

updated build instructions

parent d000dfd7
......@@ -25,9 +25,7 @@ int main() {
std::cout << "ttA ranks: " << ttA.ranks() << std::endl;
// the right hand side of the equation both as Tensor and in (Q)TT format
xerus::Tensor b({512}, []() {
return 1.0;
});
auto b = xerus::Tensor::ones({512});
b.reinterpret_dimensions(std::vector<size_t>(9, 2));
xerus::TTTensor ttb(b);
......@@ -51,8 +49,5 @@ int main() {
x(j^9) = b(i^9) / A(i^9, j^9);
// and calculate the Frobenius norm of the difference
// here i&0 denotes a multiindex large enough to fully index the respective tensors
// the subtraction of different formats will default to Tensor subtraction such that
// the TTTensor ttx will be evaluated to a Tensor prior to subtraction.
std::cout << "error: " << frob_norm(x(i&0) - ttx(i&0)) << std::endl;
std::cout << "error: " << frob_norm(x - xerus::Tensor(ttx)) << std::endl;
}
......@@ -3,13 +3,13 @@ import xerus as xe
# construct the stiffness matrix A using a fill function
def A_fill(idx):
if idx[0] == idx[1] :
return 2
return 2.0
elif idx[1] == idx[0]+1 or idx[1]+1 == idx[0] :
return -1
else
return 0
return -1.0
else:
return 0.0
A = xe.Tensor([512,512], A_fill)
A = xe.Tensor.from_function([512,512], A_fill)
# and dividing it by h^2 = multiplying it with N^2
A *= 512*512
......@@ -17,13 +17,13 @@ A *= 512*512
# reinterpret the 512x512 tensor as a 2^18 tensor
# and create (Q)TT decomposition of it
A.reinterpret_dimensions([2,]*18)
ttA = xe.TTTensor(A)
ttA = xe.TTOperator(A)
# and verify its rank
print("ttA ranks:", ttA.ranks())
# the right hand side of the equation both as Tensor and in (Q)TT format
b = xe.Tensor([512], lambda: 1)
b = xe.Tensor.ones([512])
b.reinterpret_dimensions([2,]*9)
ttb = xe.TTTensor(b)
......@@ -43,11 +43,8 @@ print("residual:", residual)
# as an comparison solve the system exactly using the Tensor / operator
x = xe.Tensor()
x(j^9) = b(i^9) / A(i^9, j^9)
x(j^9) << b(i^9) / A(i^9, j^9)
# and calculate the Frobenius norm of the difference
# here i&0 denotes a multiindex large enough to fully index the respective tensors
# the subtraction of different formats will default to Tensor subtraction such that
# the TTTensor ttx will be evaluated to a Tensor prior to subtraction.
print("error:", xe.frob_norm(x(i&0) - ttx(i&0)))
print("error:", xe.frob_norm(x - xe.Tensor(ttx)))
......@@ -42,7 +42,7 @@ def A_fill(idx):
return 2
elif idx[1] == idx[0]+1 or idx[1]+1 == idx[0] :
return -1
else
else :
return 0
A = xerus.Tensor([512,512], A_fill)
......@@ -72,7 +72,7 @@ xerus::TTOperator ttA(A);
__tabsMid
~~~ python
A.reinterpret_dimensions([2,]*18)
ttA = xerus.TTTensor(A)
ttA = xerus.TTOperator(A)
~~~
__tabsEnd
......@@ -95,16 +95,14 @@ As the generating function needs no index information, we create a `[]()->double
__tabsStart
~~~ cpp
xerus::Tensor b({512}, []() {
return 1.0;
});
auto b = xerus::Tensor::ones({512});
b.reinterpret_dimensions(std::vector<size_t>(9, 2));
xerus::TTTensor ttb(b);
~~~
__tabsMid
~~~ python
b = xerus.Tensor([512], lambda: 1)
b = xerus.Tensor.ones([512])
b.reinterpret_dimensions([2,]*9)
ttb = xerus.TTTensor(b)
......@@ -178,21 +176,20 @@ x(j^9) = b(i^9) / A(i^9, j^9);
__tabsMid
~~~ python
x = xerus.Tensor()
x(j^9) = b(i^9) / A(i^9, j^9)
x(j^9) << b(i^9) / A(i^9, j^9)
~~~
__tabsEnd
In the comparison of this exact solution `x` and the ALS solution `ttx` the TTTensor will automatically be
cast to a Tensor object to allow the subtraction. Here we can use another index shorthand: `i&0` which denotes
a multiindex of large enough dimension to fully index the respective tensor object.
In the comparison of this exact solution `x` and the ALS solution `ttx`, we have to decide whether we want to cast
the TTTensor to a Tensor or vice versa to be able to subtract them.
__tabsStart
~~~ cpp
std::cout << "error: " << frob_norm(x(i&0) - ttx(i&0)) << std::endl;
std::cout << "error: " << frob_norm(x - xerus::Tensor(ttx)) << std::endl;
~~~
__tabsMid
~~~ python
print("error:", xerus.frob_norm(x(i&0) - ttx(i&0)))
print("error:", xerus.frob_norm(x - xerus.Tensor(ttx)))
~~~
__tabsEnd
......
......@@ -13,43 +13,49 @@ section: "Documentation"
You can get the source-code of the `xerus` library via [git](https://git.hemio.de/xerus/xerus/tree/master) or as an [archiv](https://git.hemio.de/xerus/xerus/repository/archive.tar.gz?ref=master).
For example to clone the repository with the latest stable version under linux simply type
~~~
~~~ bash
git clone https://git.hemio.de/xerus/xerus.git
~~~
## Dependencies and Configuration
`Xerus` depends on several well established libraries, usually pre-installed or available through the standard package managers. In particular `lapacke`, `CXSparse`, `binutils`
and their dependencies. Also note that at at least version 4.8 of the `GCC` is required, as this is the first version to offer support for all `C++11` functionality used in `xerus`.
`Xerus` depends on several well established libraries, usually pre-installed or available through the standard package managers. In particular `lapacke`, `CXSparse`, `binutils`, `boost`
and their dependencies. Also note that at least version 4.8 of the `GCC` is required, as this is the first version to offer support for all `C++11` functionality used in `xerus`.
Make sure that all these are installed on your system before proceeding.
E.g. to install all dependencies on a fedora system execute
~~~ bash
dnf install gcc-c++ openblas-devel suitesparse-devel lapack-devel boost-devel binutils-devel
~~~
dnf install gcc-c++ openblas-devel suitesparse-devel lapack-devel binutils-devel
To build the python bindings you will furthermore need the python development headers, `numpy` as well as `boost-python` or
`boost-python3` depending on the python version you wish to use. E.g. for a fedora system and if you want to use python 2 simply execute
~~~ bash
dnf install python2-numpy python-devel
~~~
After downloading the source it is necessary to set a number of options that are somewhat individual to your system and needs. All following build steps assume that these
options are set in the `config.mk` file. Reasonable default values can be found in the `config.mk.default` file.
In particular the optimization level is interesting and the paths or standard flags for the required libraries need to be set. This way the `xerus` library can be compiled with any
`blas` compatible library for the matrix operations. For more details see the description in the `config.mk.default`
~~~
`blas` compatible library for the matrix operations. For more details see the description in the `config.mk.default` file
~~~ bash
cp config.mk.default config.mk
nano config.mk
~~~
## Making Sure Everything Works as Intended
## Ensure Everything Works as Intended
The sources include a number of unit tests that ensure that everything works as intended. To perform them simply input
~~~
~~~ bash
make test -j4
~~~
(where `-j4` allows make to use up to 4 threads for the compilation). If all options were set correctly in the `config.mk` it should compile a test executable and launch it.
The output of this executable should then list a number of passed tests and end with
The output of this executable should then list a number of passed tests and end with something similar to
~~~
|
| total summary 132 of 132 passed
| total summary 149 of 149 passed
-------------------------------------------------------------------------------
|
| Total time elapsed: 10848.406 ms
| Total time elapsed: 5594.088 ms
-------------------------------------------------------------------------------
~~~
Note in particular, that all tests were passed. Should this not be the case please file a bug report with as many details as you
......@@ -58,29 +64,34 @@ can in our [issuetracker](https://git.hemio.de/xerus/xerus/issues) or write us a
## Building the Library
If all tests were passed you can build the library simply by calling make
~~~
make all -j4
~~~
this creates the shared library object `libxerus.so` as well as the static library object `libxerus.a`.
If all tests were passed you can build the library simply by calling `make shared` or `make static` depending on whether you want
to build shared or static library objects. To build all shared objects as well as the python bindings use `make python`. After
the compilation you should have a `build/` directory with a `libxerus_misc.a/.so` (suffix depending on build type), a `libxerus.a/.so`
and the `xerus.so` for python in case you also built the python bindings.
If you want to install `xerus` on your system to the path given in `config.mk` simply call (as root if necessary)
~~~
make install
~~~
You can use these objects directly by setting your environment variables accordingly or you can install the library to a path
already included in these variables. To do so simply set the path in `config.mk` and call (as root if necessary) `make install`.
## Compiling your own Applications Using Xerus
If `xerus` is properly installed on your system, compiling your own applications using `xerus` is as simple as using any other library. Just include `-lxerus` in your linker call and make sure to use
`-std=c++11` or `-std=c++14` in all compilation units that include `xerus.h` and everything should work.
`-std=c++11` or `-std=c++14` in all compilation units that include `xerus.h` and everything should work fine.
If you want to use the static version of `xerus` you also have to include all libraries `xerus` depends on in your linker call. In particular these are lapacke (`-llapacke`),
lapack (`-llapack`), blas (`-lblas` or `-lopenblas` or similar), suitesparse (`-lumfpack -lspqr`), binutils (`-lbfd`). On some old system one has to manually include the dependencys of binutils (`-liberty -lz -ldl`).
lapack (`-llapack`), blas (`-lblas` or `-lopenblas` or similar), suitesparse (`-lcholmod -lspqr`) and binutils (`-lbfd`). On some old system one has to manually include the dependencys of binutils (`-liberty -lz -ldl`).
You can test this by trying to compile the tutorial file (in this example without the installed `xerus` library)
You can test this by trying to compile one of the examples (here without the installed `xerus` library)
~~~ bash
g++ -std=c++11 doc/_include/examples/qtt.cpp build/libxerus.a build/libxerus_misc.a -llapacke -llapack -lopenblas -lgfortran -lcholmod -lspqr -lbfd -liberty -lz -ldl
~~~
g++ -std=c++11 tutorials/fulltensor.cpp libxerus.a -llapacke -llapack -lcblas -lblas -lcxsparse -lbfd -liberty -lz -ldl
or with the shared objects and correctly set environment variables
~~~ bash
g++ -std=c++11 doc/_include/examples/qtt.cpp -lxerus
~~~
Within python you should be able to simply use
~~~ python
import xerus
~~~
if everything is set up correctly.
---
layout: post
title: "Xerus"
subtitle: "a general purpose tensor library"
---
# xerus - a general purpose tensor library {#mainpage}
# Xerus
## Introduction
......@@ -10,7 +12,8 @@ The `xerus` library is a general purpose library for numerical calculations with
The focus of development was the simple usability and adaptibility to any setting that requires higher order tensors or decompositions thereof.
The key features include:
* Modern code and concepts incorporating many features of the new `C++11` standard.
* Modern code and concepts incorporating many features of the `C++11` standard.
* Full python bindings with very similar syntax for easy transitions from and to c++.
* Calculation with tensors of arbitrary orders using an intuitive Einstein-like notation `A(i,j) = B(i,k,l) * C(k,j,l);`.
* Full implementation of the Tensor-Train decompositions (MPS) with all neccessary capabilities (including Algorithms like ALS, ADF and CG).
* Lazy evaluation of multiple tensor contractions featuring heuristics to find the most effective contraction order.
......@@ -18,13 +21,11 @@ The key features include:
* Fast sparse tensor calculation by usage of the `suiteSparse` sparse matrix capabilities.
* Capabilites to handle arbitrary Tensor Networks.
## Version History
We released our first stable version 1.0 in May 2015 and are currently at version 2.2.1. It can be obtained via [git](https://git.hemio.de/xerus/xerus) or as an archived download via the same link.
The current development version is also available in the same git repository (branch 'development') and it might include more features than the latest stable release, but be adviced that these development versions are particularly bad documented and might change drastically from one day to the next.
## Getting Started
There are a number of tutorials to get you started using the `xerus` library.
* [Building xerus](@ref md_building_xerus) - instruction on how to build the library iteself and your first own program using it.
* [Quick-Start guide](_quick-_start-example.html) - a short introduction into the basic `xerus` functionality.
......
......@@ -324,6 +324,14 @@ BOOST_PYTHON_MODULE(xerus) {
}, return_value_policy<manage_new_object, // result is treated as a new object
with_custodian_and_ward_postcall<0,1>>()); // but the argument will not be destroyed before the result is destroyed
enum_<Tensor::Representation>("Representation", "Possible representations of Tensor objects.")
.value("Dense", Tensor::Representation::Dense)
.value("Sparse", Tensor::Representation::Sparse)
;
enum_<Tensor::Initialisation>("Initialisation", "Possible initialisations of new Tensor objects.")
.value("Zero", Tensor::Initialisation::Zero)
.value("None", Tensor::Initialisation::None)
;
// ----------------------------------------------------------- Tensor
{ scope Tensor_scope =
......@@ -533,15 +541,7 @@ BOOST_PYTHON_MODULE(xerus) {
_this[_i] = _val;
})
;
enum_<Tensor::Representation>("Representation", "Possible representations of Tensor objects.")
.value("Dense", Tensor::Representation::Dense)
.value("Sparse", Tensor::Representation::Sparse)
// .export_values() // would define Tensor.Sparse = Tensor.Representation.Sparse etc.
;
enum_<Tensor::Initialisation>("Initialisation", "Possible initialisations of new Tensor objects.")
.value("Zero", Tensor::Initialisation::Zero)
.value("None", Tensor::Initialisation::None)
;
} // close Tensor_scope
variable_argument_member_to_tuple_wrapper("Tensor.__call__", "TensorCallOperator");
//def("frob_norm", +[](const Tensor& _x){ return _x.frob_norm(); }); // see below
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment