Commit 2ba4c02f authored by Sebastian Wolf's avatar Sebastian Wolf

Merge branch 'master' into development

parents d82eb788 79d1a033
......@@ -10,7 +10,7 @@ stages:
job_build_homepage:
stage: build_homepage
script: "make -C doc doc; scp -r doc/html xerusweb:libxerus.org-443"
script: "cp .config.mk.ci.gcc config.mk; make -C doc doc; scp -r doc/html xerusweb:libxerus.org-443"
when: always
only:
- master
......
......@@ -2,6 +2,11 @@
Potentially breaking changes are marked with an exclamation point '!' at the begin of their description.
* 2017-05-31 v3.0.1
* Added TTNetwork::use_dense_representations() to convert all components to dense representation.
* Exporting Tensor::dense_copy() and Tensor::sparse_copy() to python.
* More examples on the homepage and checks for code-coverage.
* 2017-05-29 v3.0.0
* Python wrapper now stable.
* ! REQUIRE macro now logs as error instead of fatal error.
......
......@@ -224,7 +224,7 @@ fullTest: $(TUTORIALS) $(TEST_NAME)
.FORCE:
doc: .FORCE
doc: .FORCE doc/parseDoxytags doc/findDoxytag
make -C doc doc
......@@ -232,14 +232,13 @@ clean:
rm -fr build
-rm -f $(TEST_NAME)
-rm -f include/xerus.h.gch
-rm -r doc/html
make -C doc clean
benchmark: $(MINIMAL_DEPS) $(LOCAL_HEADERS) benchmark.cxx $(LIB_NAME_STATIC)
$(CXX) $(FLAGS) benchmark.cxx $(LIB_NAME_STATIC) $(SUITESPARSE) $(LAPACK_LIBRARIES) $(BLAS_LIBRARIES) $(CALLSTACK_LIBS) -lboost_filesystem -lboost_system -o Benchmark
# Build rule for normal misc objects
build/.miscObjects/%.o: %.cpp $(MINIMAL_DEPS)
mkdir -p $(dir $@)
......
......@@ -3,7 +3,7 @@
The `xerus` library is a general purpose library for numerical calculations with higher order tensors, Tensor-Train Decompositions / Matrix Product States and other Tensor Networks.
The focus of development was the simple usability and adaptibility to any setting that requires higher order tensors or decompositions thereof.
For tutorials and a documentation see <a href="http://libxerus.org">the doxygen documentation</a>.
For tutorials and a documentation see <a href="http://libxerus.org">the documentation</a>.
The source code is licenced under the AGPL v3.0. For more details see the LICENSE file.
......@@ -20,6 +20,7 @@ The source code is licenced under the AGPL v3.0. For more details see the LICENS
Copy the default configuration and modify it for your needs
> cp config.mk.default config.mk
> nano config.mk
Test whether everything works correctly with
......@@ -27,9 +28,10 @@ Test whether everything works correctly with
build (and optionally install) the library with
> make all -j4
> sudo make install
and you should be ready to use the library. For more details see <a href="https://www.libxerus.org/md_building_xerus.html">the "Building Xerus" page in the documentation</a>.
and you should be ready to use the library. For more details see <a href="https://www.libxerus.org/building_xerus/">the "Building Xerus" page in the documentation</a>.
# Issues #
......
......@@ -8,15 +8,34 @@ help:
\t\tserve \t\t -- Build the html documentation for the xerus library and offer it via 'jekyll serve'.\n \
\t\tclean \t\t -- Remove all documentation files.\n"
doc:
.FORCE:
doc: .FORCE parseDoxytags findDoxytag
-mkdir html
doxygen doxygen/Doxyfile
./parseDoxytags
jekyll build --source jekyll/ --destination html/
clean:
-rm -r html
-rm -rf html
-rm -f parseDoxytags findDoxytag
-rm -f xerus.tags xerus.tagfile
serve:
serve: .FORCE parseDoxytags findDoxytag
-mkdir html
doxygen doxygen/Doxyfile
./parseDoxytags
jekyll serve --source jekyll/ --destination html/
include ../config.mk
include ../makeIncludes/general.mk
include ../makeIncludes/warnings.mk
include ../makeIncludes/optimization.mk
FLAGS = $(strip $(WARNINGS) $(OPTIMIZE) $(OTHER))
parseDoxytags: ../src/docHelper/parseDoxytags.cpp
$(CXX) $(FLAGS) ../src/docHelper/parseDoxytags.cpp -o parseDoxytags
findDoxytag: ../src/docHelper/findDoxytag.cpp
$(CXX) $(FLAGS) ../src/docHelper/findDoxytag.cpp -o findDoxytag
......@@ -740,7 +740,7 @@ WARN_IF_DOC_ERROR = YES
# parameter documentation, but not about the absence of documentation.
# The default value is: NO.
WARN_NO_PARAMDOC = YES
WARN_NO_PARAMDOC = NO
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
......@@ -2050,7 +2050,7 @@ TAGFILES =
# tag file that is based on the input files it reads. See section "Linking to
# external documentation" for more information about the usage of tag files.
GENERATE_TAGFILE =
GENERATE_TAGFILE = xerus.tagfile
# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
# the class index. If set to NO, only the inherited external classes will be
......
#include <xerus.h>
#include <iostream>
#include <string>
#include <fstream>
using namespace xerus;
const size_t MAX_NUM_PER_SITE = 32;
Tensor create_M() {
Tensor M = -1*Tensor::identity({MAX_NUM_PER_SITE, MAX_NUM_PER_SITE});
for (size_t i = 0; i < MAX_NUM_PER_SITE-1; ++i) {
M[{i+1, i}] = 1.0;
}
return M;
}
Tensor create_L() {
Tensor L({MAX_NUM_PER_SITE, MAX_NUM_PER_SITE});
L.modify_diagonal_entries([](value_t& _x, const size_t _i) {
_x = double(_i)/double(_i+5);
});
return L;
}
Tensor create_S() {
Tensor S({MAX_NUM_PER_SITE, MAX_NUM_PER_SITE});
// Set diagonal
for (size_t i = 0; i < MAX_NUM_PER_SITE; ++i) {
S[{i, i}] = -double(i);
}
// Set offdiagonal
for (size_t i = 0; i < MAX_NUM_PER_SITE-1; ++i) {
S[{i, i+1}] = double(i+1);
}
return 0.07*S;
}
TTOperator create_operator(const size_t _degree) {
const Index i, j, k, l;
// Create matrices
const Tensor M = create_M();
const Tensor S = create_S();
const Tensor L = create_L();
const Tensor Sstar = 0.7*M+S;
const Tensor I = Tensor::identity({MAX_NUM_PER_SITE, MAX_NUM_PER_SITE});
// Create empty TTOperator
TTOperator A(2*_degree);
Tensor comp;
// Create first component
comp(i, j, k, l) =
Sstar(j, k)*Tensor::dirac({1, 3}, 0)(i, l)
+ L(j, k)*Tensor::dirac({1, 3}, 1)(i, l)
+ I(j, k)*Tensor::dirac({1, 3}, 2)(i, l);
A.set_component(0, comp);
// Create middle components
comp(i, j, k, l) =
I(j, k) * Tensor::dirac({3, 3}, {0, 0})(i, l)
+ M(j, k) * Tensor::dirac({3, 3}, {1, 0})(i, l)
+ S(j, k) * Tensor::dirac({3, 3}, {2, 0})(i, l)
+ L(j, k) * Tensor::dirac({3, 3}, {2, 1})(i, l)
+ I(j, k) * Tensor::dirac({3, 3}, {2, 2})(i, l);
for(size_t c = 1; c+1 < _degree; ++c) {
A.set_component(c, comp);
}
// Create last component
comp(i, j, k, l) =
I(j, k)*Tensor::dirac({3, 1}, 0)(i, l)
+ M(j, k)*Tensor::dirac({3, 1}, 1)(i, l)
+ S(j, k)*Tensor::dirac({3, 1}, 2)(i, l);
A.set_component(_degree-1, comp);
return A;
}
/// @brief calculates the one-norm of a TTTensor, assuming that all entries in it are positive
double one_norm(const TTTensor &_x) {
Index j;
return double(_x(j&0) * TTTensor::ones(_x.dimensions)(j&0));
}
std::vector<TTTensor> implicit_euler(const TTOperator& _A, TTTensor _x,
const double _stepSize, const size_t _n)
{
const TTOperator op = TTOperator::identity(_A.dimensions)-_stepSize*_A;
Index j,k;
auto ourALS = ALS_SPD;
ourALS.convergenceEpsilon = 0;
ourALS.numHalfSweeps = 2;
std::vector<TTTensor> results;
TTTensor nextX = _x;
results.push_back(_x);
for(size_t i = 0; i < _n; ++i) {
ourALS(op, nextX, _x);
// Normalize
double norm = one_norm(nextX);
nextX /= norm;
XERUS_LOG(iter, "Done itr " << i
<< " residual: " << frob_norm(op(j/2,k/2)*nextX(k&0) - _x(j&0))
<< " norm: " << norm);
_x = nextX;
results.push_back(_x);
}
return results;
}
double get_mean_concentration(const TTTensor& _res, const size_t _i) {
const Index k,l;
TensorNetwork result(_res);
const Tensor weights({MAX_NUM_PER_SITE}, [](const size_t _k){
return double(_k);
});
const Tensor ones = Tensor::ones({1, MAX_NUM_PER_SITE, 1});
for (size_t j = 0; j < _res.degree(); ++j) {
if (j == _i) {
result(l&0) = result(k, l&1) * weights(k);
} else {
result(l&0) = result(k, l&1) * ones(k);
}
}
// at this point the degree of 'result' is 0, so there is only one entry
return result[{}];
}
void print_mean_concentrations_to_file(const std::vector<TTTensor> &_result) {
std::fstream out("mean.dat", std::fstream::out);
for (const auto& res : _result) {
for (size_t k = 0; k < res.degree(); ++k) {
out << get_mean_concentration(res, k) << ' ';
}
out << std::endl;
}
}
int main() {
const size_t numProteins = 10;
const size_t numSteps = 300;
const double stepSize = 1.0;
const size_t rankX = 3;
auto start = TTTensor::dirac(
std::vector<size_t>(numProteins, MAX_NUM_PER_SITE),
0
);
start.use_dense_representations();
start += 1e-14 * TTTensor::random(
start.dimensions,
std::vector<size_t>(start.degree()-1, rankX-1)
);
const auto A = create_operator(numProteins);
const auto results = implicit_euler(A, start, stepSize, numSteps);
print_mean_concentrations_to_file(results);
}
#!/usr/bin/gnuplot
set term pngcairo size 600, 400
unset key
set grid
set output "cascade.png"
set xlabel "timestep"
set ylabel "mean concentration"
plot 'mean.dat' u 1 w l, \
'mean.dat' u 2 w l, \
'mean.dat' u 3 w l, \
'mean.dat' u 4 w l, \
'mean.dat' u 5 w l, \
'mean.dat' u 6 w l, \
'mean.dat' u 7 w l, \
'mean.dat' u 8 w l, \
'mean.dat' u 9 w l, \
'mean.dat' u 10 w l
import xerus as xe
MAX_NUM_PER_SITE = 32
def create_M():
M = -1 * xe.Tensor.identity([MAX_NUM_PER_SITE, MAX_NUM_PER_SITE])
for i in xrange(MAX_NUM_PER_SITE-1) :
M[[i+1, i]] = 1.0
return M
def create_L():
L = xe.Tensor([MAX_NUM_PER_SITE, MAX_NUM_PER_SITE])
for i in xrange(MAX_NUM_PER_SITE) :
L[[i,i]] = i / (i+5.0)
return L
def create_S():
S = xe.Tensor([MAX_NUM_PER_SITE, MAX_NUM_PER_SITE])
# set diagonal
for i in xrange(MAX_NUM_PER_SITE) :
S[[i,i]] = -i
# set offdiagonal
for i in xrange(MAX_NUM_PER_SITE-1) :
S[[i,i+1]] = i+1
return 0.07*S
def create_operator(degree):
i,j,k,l = xe.indices(4)
# create matrices
M = create_M()
S = create_S()
L = create_L()
Sstar = 0.7*M + S;
I = xe.Tensor.identity([MAX_NUM_PER_SITE, MAX_NUM_PER_SITE])
# create empty TTOperator
A = xe.TTOperator(2*degree)
# create first component
comp = xe.Tensor()
comp(i, j, k, l) << \
Sstar(j, k) * xe.Tensor.dirac([1, 3], 0)(i, l) \
+ L(j, k) * xe.Tensor.dirac([1, 3], 1)(i, l) \
+ I(j, k) * xe.Tensor.dirac([1, 3], 2)(i, l)
A.set_component(0, comp)
# create middle components
comp(i, j, k, l) << \
I(j, k) * xe.Tensor.dirac([3, 3], [0, 0])(i, l) \
+ M(j, k) * xe.Tensor.dirac([3, 3], [1, 0])(i, l) \
+ S(j, k) * xe.Tensor.dirac([3, 3], [2, 0])(i, l) \
+ L(j, k) * xe.Tensor.dirac([3, 3], [2, 1])(i, l) \
+ I(j, k) * xe.Tensor.dirac([3, 3], [2, 2])(i, l)
for c in xrange(1, degree-1) :
A.set_component(c, comp)
# create last component
comp(i, j, k, l) << \
I(j, k) * xe.Tensor.dirac([3, 1], 0)(i, l) \
+ M(j, k) * xe.Tensor.dirac([3, 1], 1)(i, l) \
+ S(j, k) * xe.Tensor.dirac([3, 1], 2)(i, l)
A.set_component(degree-1, comp)
return A
def one_norm(x):
i = xe.Index()
return float(x(i&0) * xe.TTTensor.ones(x.dimensions)(i&0))
def implicit_euler(A, x, stepSize, n):
op = xe.TTOperator.identity(A.dimensions) - stepSize*A
j,k = xe.indices(2)
ourALS = xe.ALS_SPD
ourALS.convergenceEpsilon = 1e-4
ourALS.numHalfSweeps = 100
results = [x]
nextX = xe.TTTensor(x)
for i in xrange(n) :
ourALS(op, nextX, x)
# normalize
norm = one_norm(nextX)
nextX /= norm
print("done itr", i, \
"residual:", xe.frob_norm(op(j/2,k/2)*nextX(k&0) - x(j&0)), \
"one-norm:", norm)
x = xe.TTTensor(nextX) # ensure it is a copy
results.append(x)
return results
def get_mean_concentration(x, i):
k,l = xe.indices(2)
result = xe.TensorNetwork(x)
weights = xe.Tensor.from_function([MAX_NUM_PER_SITE], lambda idx: idx[0])
ones = xe.Tensor.ones([MAX_NUM_PER_SITE])
for j in xrange(x.degree()) :
if j == i :
result(l&0) << result(k, l&1) * weights(k)
else :
result(l&0) << result(k, l&1) * ones(k)
# at this point the degree of 'result' is 0, so there is only one entry
return result[[]]
def print_mean_concentrations_to_file(results):
f = open("mean.dat", 'w')
for res in results :
for k in xrange(res.degree()) :
f.write(str(get_mean_concentration(res, k))+' ')
f.write('\n')
f.close()
numProteins = 10
numSteps = 200
stepSize = 1.0
rankX = 3
A = create_operator(numProteins)
start = xe.TTTensor.dirac([MAX_NUM_PER_SITE]*numProteins, 0)
for i in xrange(numProteins) :
start.set_component(i, start.get_component(i).dense_copy())
start += 1e-14 * xe.TTTensor.random(start.dimensions, [rankX-1]*(start.degree()-1))
results = implicit_euler(A, start, stepSize, numSteps)
print_mean_concentrations_to_file(results)
......@@ -25,10 +25,8 @@ int main() {
std::cout << "ttA ranks: " << ttA.ranks() << std::endl;
// the right hand side of the equation both as Tensor and in (Q)TT format
auto b = xerus::Tensor::ones({512});
b.reinterpret_dimensions(std::vector<size_t>(9, 2));
xerus::TTTensor ttb(b);
auto b = xerus::Tensor::ones(std::vector<size_t>(9, 2));
auto ttb = xerus::TTTensor::ones(b.dimensions);
// construct a random initial guess of rank 3 for the ALS algorithm
xerus::TTTensor ttx = xerus::TTTensor::random(std::vector<size_t>(9, 2), std::vector<size_t>(8, 3));
......
......@@ -23,9 +23,8 @@ ttA = xe.TTOperator(A)
print("ttA ranks:", ttA.ranks())
# the right hand side of the equation both as Tensor and in (Q)TT format
b = xe.Tensor.ones([512])
b.reinterpret_dimensions([2,]*9)
ttb = xe.TTTensor(b)
b = xe.Tensor.ones([2,]*9)
ttb = xe.TTTensor.ones(b.dimensions)
# construct a random initial guess of rank 3 for the ALS algorithm
ttx = xe.TTTensor.random([2,]*9, [3,]*8)
......
......@@ -3,6 +3,7 @@
<head>
<title>Xerus Documentation{% if page.title %} - {{ page.title }} {% endif %}</title>
<link rel="icon" href="{{ site.baseurl }}/images/xerus.png">
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<script src="{{ site.baseurl }}/js/jquery.min.js"></script>
......
......@@ -20,9 +20,12 @@ class TabsConverter < Converter
.gsub('<p>__dangerEnd</p>', "</div>")
.gsub('<p>__warnStart</p>', "<div class=\"alert alert-warning\">")
.gsub('<p>__warnEnd</p>', "</div>")
.gsub('<p>__infoStart</p>', "<div class=\"alert alert-info\">")
.gsub('<p>__infoEnd</p>', "</div>")
.gsub('__breakFix1</a></p>', "")
.gsub('<p>__breakFix2', "</a>")
.gsub('__version', %x( git describe --tags --always --abbrev=0 ) )
.gsub('__version', %x( cat ../VERSION ) )
.gsub(/__doxyref\(([^\)]+)\)/){ |m| %x( ./findDoxytag #{$1} ) }
end
end
end
......
This diff is collapsed.
---
layout: post
title: "The ALS Algorithm"
date: 1000-12-10
date: 1000-11-10
topic: "Examples"
section: "Examples"
---
......
......@@ -13,7 +13,7 @@ demonstrate the general layout and is enough for very basic applications. It is
at the more detailed guides for all classes one wishes to use though - or even have a look at the doxygen class documentation for details on all functions.
It is assumed that you have already obtained and compiled the library itself as well as know how to link against it.
If this is not the case, please refer to the [building xerus](building_xerus) page.
If this is not the case, please refer to the [building xerus](/building_xerus) page.
In the following we will solve a FEM equation arising from the heat equation using a QTT decomposition and the ALS algorithm.
......@@ -90,22 +90,18 @@ print("ttA ranks:", ttA.ranks())
~~~
__tabsEnd
For the right-hand-side we perform similar operations to obtain a QTT decomposed vector $ b_i = 1 \forall i $.
As the generating function needs no index information, we create a `[]()->double` lambda function:
For the right-hand-side we will take a simple tensor that is equal to 1 at every position. As this is a commonly used tensor,
we can simply use the named constructor provide dby `xerus`.
__tabsStart
~~~ cpp
auto b = xerus::Tensor::ones({512});
b.reinterpret_dimensions(std::vector<size_t>(9, 2));
xerus::TTTensor ttb(b);
auto b = xerus::Tensor::ones(std::vector<size_t>(9, 2));
auto ttb = xerus::TTTensor::ones(b.dimensions);
~~~
__tabsMid
~~~ python
b = xerus.Tensor.ones([512])
b.reinterpret_dimensions([2,]*9)
ttb = xerus.TTTensor(b)
b = xerus.Tensor.ones([2,]*9)
ttb = xerus.TTTensor.ones(b.dimensions)
~~~
__tabsEnd
......
This diff is collapsed.
......@@ -12,7 +12,7 @@ In matrix calculus the decomposition of a matrix into the matrix product of seve
into an orthogonal and a triangular matrix (QR) or orthogonal matrices and diagonal of singular values (SVD)) are among the
most powerful tools to devise numerical algorithms. In the case of tensors of higher degree it is necessary to indicate along
which modes the decomposition is supposed to happen, so `xerus` uses the notation of indexed equations explained in the previous
chapter ([Indices and Equations](indices)).
chapter ([Indices and Equations](/indices)).
## QR Decompositions
To provide an intuitive approach to decompositions, `xerus` uses the assignment of multiple tensors with a single operator to
......
......@@ -57,12 +57,12 @@ left hand side.
The left hand side (`c` in the above example) is not required to have the right degree or dimensions. The type of `c` on the
other hand does change the meaning of the equation and so has to be set correctly. E.g. if `c` is a `xerus::TensorNetwork`, no
contraction is performed as `A(i,j)*b(j)` is in itself a valid tensor network. See also the tutorials on [TT-Tensors](tttensors)
and [Tensor Networks](tensornetworks) for details on the respective assignments.
contraction is performed as `A(i,j)*b(j)` is in itself a valid tensor network. See also the tutorials on [TT-Tensors](/tttensors)
and [Tensor Networks](/tensornetworks) for details on the respective assignments.
__warnStart
Unless runtime checks have explicitely been disabled during compilation of `xerus` (see [Optimizations](optimization)), invalid
Unless runtime checks have explicitely been disabled during compilation of `xerus` (see [Optimizations](/optimization)), invalid
indexed expressions will produce runtime errors (in the form of `xerus::misc::generic_error`s being thrown as exceptions).
__tabsStart
......@@ -163,7 +163,7 @@ C(i,k) << A(i&1, j) * B(j, k&1)
~~~
__tabsEnd
The division `i/n` is useful for example to write equations with high dimensional operators such as [TT-Operators](tttensors)
The division `i/n` is useful for example to write equations with high dimensional operators such as [TT-Operators](/tttensors)
for which the indices are ordered per default such, that the application of the operator can be written in analogy to
matrix-vector products as:
......@@ -188,7 +188,7 @@ that the degree of `v` is equal to half the degree of `A` and that its dimension
A common use for indexed expressions is to construct tensors in a blockwise fashion. In the following example we were able to
calculate the tensor `comp` whenever the first index was fixed, either by numerical construction (`A` and `B`) or by showing
mathematically, that it is then equal to a well known tensor (here the identity matrix). The full tensor can thus be constructed
with the help of the named constructors of `xerus::Tensor` (see the [Tensor tutorial](tensor)) as follows.
with the help of the named constructors of `xerus::Tensor` (see the [Tensor tutorial](/tensor)) as follows.
__tabsStart
~~~ cpp
......
This diff is collapsed.
......@@ -33,6 +33,8 @@ To build the python bindings you will furthermore need the python development he
dnf install python2-numpy python-devel
~~~
To build the documentation (i.e. this homepage) locally you will need `jekyll` and `doxygen`.
After downloading the source it is necessary to set a number of options that are somewhat individual to your system and needs. All following build steps assume that these
options are set in the `config.mk` file. Reasonable default values can be found in the `config.mk.default` file.
......
......@@ -46,6 +46,7 @@ input:checked + label {
position: relative;
width: 100%;
z-index: 5;
margin-bottom: .25em;
}
#content section {
......@@ -55,6 +56,15 @@ input:checked + label {
z-index: -100;
}
.highlighter-rouge {
overflow-x: auto;
}
.highlighter-rouge pre {
min-width: 662px;
tab-size: 4;
}
#content #content1 {
position: relative;
}
......
......@@ -12,24 +12,24 @@ This Documentation is still **work in progress**. Several Sections are currently
__warnEnd
This is the semi-complete documentation of the `xerus` library. It does not provide you with precise function declarations or
class hierarchies (check out the [doxygen documentation](doxygen) for those) but instead focuses on small working code snippets
class hierarchies (check out the [doxygen documentation](/doxygen) for those) but instead focuses on small working code snippets
to demonstrate `xerus`'s capabilities.
The documentation is categorized into chapters that each focus on individual features of the library. In particular the chapters in the "Basic
Usage" section only build upon the knowledge of the previous chapters so that they can easily be read in order. They are still
useful as lookup resources later on though.
If you have not already done so, you will most likely want to start by downloading and [building xerus](building_xerus). If
you are uncertain or confused by our nomenclature at any point you can reference the [nomenclature](nomenclature) chapter which
If you have not already done so, you will most likely want to start by downloading and [building xerus](/building_xerus). If
you are uncertain or confused by our nomenclature at any point you can reference the [nomenclature](/nomenclature) chapter which
should hopefully allow you to deduce the meaning of all tensor specific terms we use.
The "Basic Usage" section starts out with the creation and modification of (sparse or dense) tensors ([The Tensor Class](tensor)),
how to use them in indexed equations to denote contractions, summations etc. ([Indices and Equations](indices)) and finally
how to denote generalizations of matrix decompositions ([Decompositions and Solve](decompositions)) before explainig any more
elaborate tensor format. The focus of `xerus` so far clearly lies on the Tensor Train decomposition ([TT-Tensors](tttensors)) and
algorithms for those to solve least squares problems ([Alternating Algorithms](als), [Riemannian Algorithms](riemannian)) or
tensor recovery and completion problems ([Tensor Completion / Recovery](completion)). It is possible to use `xerus`'s capabilities
to construct and contract arbitrary tensor networks though ([General Tensor Networks](tensornetworks)).
The "Basic Usage" section starts out with the creation and modification of (sparse or dense) tensors ([The Tensor Class](/tensor)),
how to use them in indexed equations to denote contractions, summations etc. ([Indices and Equations](/indices)) and finally
how to denote generalizations of matrix decompositions ([Decompositions and Solve](/decompositions)) before explainig any more
elaborate tensor format. The focus of `xerus` so far clearly lies on the Tensor Train decomposition ([TT-Tensors](/tttensors)) and
algorithms for those to solve least squares problems ([Alternating Algorithms](/als), [Riemannian Algorithms](/riemannian)) or
tensor recovery and completion problems ([Tensor Completion / Recovery](/completion)). It is possible to use `xerus`'s capabilities
to construct and contract arbitrary tensor networks though ([General Tensor Networks](/tensornetworks)).
In the "Advanced Usage" section you find instructions on how to optimize your usage of the `xerus` library to gain those last
10-20% of speedup ([Optimization](optimization)) and explanations of the debugging tools that `xerus` gives you ([Debugging](debugging)).
10-20% of speedup ([Optimization](/optimization)) and explanations of the debugging tools that `xerus` gives you ([Debugging](/debugging)).