Commit dec265fa authored by Ben Huber's avatar Ben Huber

translated first part of Tensor tutorial to python

parent ba76ff90
Pipeline #711 failed with stages
in 4 minutes and 51 seconds
......@@ -73,6 +73,7 @@ COMPILE_THREADS = 8 # Number of threads to use during link time optimizatio
# DEBUG += -fsanitize=undefined # GCC only
# DEBUG += -fsanitize=memory # Clang only
# DEBUG += -fsanitize=address # find out of bounds access
# DEBUG += -pg # adds profiling code for the 'gprof' analyzer
# Xerus has a buildin logging system to provide runtime information. Here you can adjust the logging level used by the library.
......
......@@ -9,7 +9,12 @@ tensors.
## Creation of Tensors
The most basic tensors can be created with the empty constructor
~~~.cpp
A = xerus::Tensor()
// creates a degree 0 tensor
A = xerus::Tensor();
~~~
~~~.py
# creates a degree 0 tensor
A = xerus.Tensor()
~~~
it is of degree 0 and represents the single number 0. Similarly the constructors that take either the degree or a vector of
dimensions as input create (sparse) tensors that are equal to 0 everywhere
......@@ -19,6 +24,13 @@ B = xerus::Tensor(3);
// creates a sparse 2x2x2 tensor without any entries
C = xerus::Tensor({2,2,2});
~~~
~~~.py
# creates a 1x1x1 tensor with entry 0
B = xerus.Tensor(3)
# creates a sparse 2x2x2 tensor without any entries
C = xerus.Tensor([2,2,2])
# equivalently: xerus.Tensor(dim=[2,2,2])
~~~
The latter of these can be forced to create a dense tensor instead which can either be initialized to 0 or uninitialized
~~~.cpp
// creates a dense 2x2x2 tensor with all entries set to 0
......@@ -26,6 +38,12 @@ D = xerus::Tensor({2,2,2}, xerus::Tensor::Representation::Dense);
// creates a dense 2x2x2 tensor with uninitialized entries
E = xerus::Tensor({2,2,2}, xerus::Tensor::Representation::Dense, xerus::Tensor::Initialisation::None);
~~~
~~~.py
# creates a dense 2x2x2 tensor with all entries set to 0
D = xerus.Tensor(dim=[2,2,2], repr=xerus.Tensor.Representation.Dense)
# creates a dense 2x2x2 tensor with uninitialized entries
E = xerus.Tensor(dim=[2,2,2], repr=xerus.Tensor.Representation.Dense, init=xerus.Tensor.Initialisation.None)
~~~
Other commonly used tensors (apart from the 0 tensor) are available through named constructors:
~~~.cpp
......@@ -45,6 +63,24 @@ xerus::Tensor::random_orthogonal({4,4},{4,4});
// a 4x4x4 sparse tensor with 10 random entries in uniformly distributed random positions
xerus::Tensor::random({4,4,4}, 10);
~~~
~~~.py
# a 2x3x4 tensor with all entries = 1
xerus.Tensor.ones([2,3,4])
# an (3x4) x (3x4) identity operator
xerus.Tensor.identity([3,4,3,4])
# a 3x4x3x4 tensor with superdiagonal = 1 (where all 4 indices coincide) and = 0 otherwise
xerus.Tensor.kronecker([3,4,3,4])
# a 2x2x2 tensor with a 1 in position {1,1,1} and 0 everywhere else
xerus.Tensor.dirac([2,2,2], [1,1,1])
# equivalently xerus.Tensor.dirac(dim=[2,2,2], pos=[1,1,1])
# a 4x4x4 tensor with i.i.d. Gaussian random values
xerus.Tensor.random([4,4,4])
# a (4x4) x (4x4) random orthogonal operator drawn according to the Haar measure
xerus.Tensor.random_orthogonal([4,4],[4,4])
# a 4x4x4 sparse tensor with 10 random entries in uniformly distributed random positions
xerus.Tensor.random([4,4,4], n=10)
~~~
If the entries of the tensor should be calculated externally, it is possible in c++ to either pass the raw data directly (as
`std::unique_ptr<double>` or `std::shared_ptr<double>`, check section 'Advanced Use and Ownership of Data' for the latter!)
......@@ -69,6 +105,11 @@ H = xerus::Tensor({16,16,16}, 16, [](size_t num, size_t max) -> std::pair<size_t
return std::pair<size_t,double>(num*17, double(num)/double(max));
});
~~~
~~~.py
# create a dense 2x2x2 tensor with every entry populated by a callback (lambda) function
G = xerus.Tensor.from_function([2,2,2], lambda idx: idx[0]*idx[1]*idx[2])
~~~
In python raw data structures are not directly compatible to those used in `xerus` internally. Tensors can be constructed from
`numpy.ndarray` objects though. This function will also implicitely accept pythons native array objects.
~~~.py
......@@ -87,6 +128,12 @@ V = xerus::Tensor({2,2});
V[{0,0}] = 1.0; // equivalently: V[0] = 1.0;
V[{1,1}] = 1.0; // equivalently: V[3] = 1.0;
~~~
~~~.py
# creating an identity matrix by explicitely setting non-zero entries
V = xerus.Tensor([2,2])
V[[0,0]] = 1.0 # equivalently: V[0] = 1.0
V[[1,1]] = 1.0 # equivalently: V[3] = 1.0
~~~
## Sparse and Dense Representations
......@@ -99,11 +146,19 @@ This behaviour can be modified by changing the global setting
// tell xerus to convert sparse tensors to dense if 1 in 4 entries are non-zero
xerus::Tensor::sparsityFactor = 4;
~~~
~~~.py
# tell xerus to convert sparse tensors to dense if 1 in 4 entries are non-zero
xerus.Tensor.sparsityFactor = 4
~~~
in particular, setting the [`sparsityFactor`](\ref xerus::Tensor::sparsityFactor) to 0 will disable this feature.
~~~.cpp
// stop xerus from automatically converting sparse tensors to dense
xerus::Tensor::sparsityFactor = 0;
~~~
~~~.py
# stop xerus from automatically converting sparse tensors to dense
xerus.Tensor.sparsityFactor = 0
~~~
Note though, that calculations with non-sparse Tensors that are stored in a sparse representation are typically much slower than
in dense representation. You should thus manually convert overly full sparse Tensors to the dense representation.
......@@ -131,6 +186,22 @@ W.use_dense_representation();
// query its sparsity. likely output: "10000 100"
std::cout << W.sparsity() << ' ' << W.count_non_zero_entries() << std:endl;
~~~
~~~.py
# create a sparse tensor with 100 random entries
W = xerus.Tensor.random(dim=[100,100], n=100)
# query its sparsity. likely output: "100 100"
print(W.sparsity(), W.count_non_zero_entries())
# store an explicit 0 value in the sparse representation
W[[0,0]] = 0.0
# query its sparsity. likely output: "101 100"
print(W.sparsity(), W.count_non_zero_entries())
# convert the tensor to dense representation
W.use_dense_representation()
# query its sparsity. likely output: "10000 100"
print(W.sparsity(), W.count_non_zero_entries())
~~~
## Operators and Modifications
......
......@@ -336,7 +336,7 @@ namespace xerus {
/**
* @brief: Returns a Tensor with a single entry equals oen and all other zero.
* @brief: Returns a Tensor with a single entry equals one and all other zero.
* @param _dimensions the dimensions of the new tensor.
* @param _position The position of the one
*/
......@@ -344,7 +344,7 @@ namespace xerus {
/**
* @brief: Returns a Tensor with a single entry equals oen and all other zero.
* @brief: Returns a Tensor with a single entry equals one and all other zero.
* @param _dimensions the dimensions of the new tensor.
* @param _position The position of the one
*/
......
......@@ -61,7 +61,7 @@ else ifdef HIGH_OPTIMIZATION
# OPTIMIZE += -floop-unroll-and-jam # Enable unroll and jam for the ISL based loop nest optimizer.
OPTIMIZE += -fmodulo-sched # Perform swing modulo scheduling immediately before the first scheduling pass.
OPTIMIZE += -fmodulo-sched-allow-regmoves # Perform more aggressive SMS-based modulo scheduling with register moves allowed.
OPTIMIZE += -fomit-frame-pointer # Don't keep the frame pointer in a register for functions that don't need one.
# OPTIMIZE += -fomit-frame-pointer # Don't keep the frame pointer in a register for functions that don't need one.
OPTIMIZE += -fsched-pressure # Enable register pressure sensitive insn scheduling before register allocation.
OPTIMIZE += -fsched-spec-load # Allow speculative motion of some load instructions.
OPTIMIZE += -fsched2-use-superblocks # When scheduling after register allocation, use superblock scheduling.
......
......@@ -103,7 +103,7 @@ namespace xerus {
void solve() {
const double solutionsNorm = frob_norm(b);
std::vector<double> residuals(10, 1000.0);
const size_t maxIterations = 10000;
const size_t maxIterations = 1;
// Rebuild right stack
......
......@@ -526,6 +526,7 @@ namespace xerus {
}
/// Solves Ax = b for x
/// order of checks and solvers inspired by matlabs mldivide https://de.mathworks.com/help/matlab/ref/mldivide.html
void solve(double* const _x, const double* const _A, const size_t _m, const size_t _n, const double* const _b, const size_t _nrhs) {
REQUIRE(_m <= static_cast<size_t>(std::numeric_limits<int>::max()), "Dimension to large for BLAS/Lapack");
REQUIRE(_n <= static_cast<size_t>(std::numeric_limits<int>::max()), "Dimension to large for BLAS/Lapack");
......
......@@ -330,7 +330,14 @@ BOOST_PYTHON_MODULE(xerus) {
class_<Tensor>("Tensor",
"a non-decomposed Tensor in either sparse or dense representation"
)
.def(init<const Tensor::DimensionTuple&>(args("dimensions"), "constructs a Tensor with the given dimensions"))
.def(init<Tensor::DimensionTuple, Tensor::Representation, Tensor::Initialisation>(
(
arg("dim"),
arg("repr")=Tensor::Representation::Sparse,
arg("init")=Tensor::Initialisation::Zero
),
"constructs a Tensor with the given dimensions")
)
.def(init<const TensorNetwork&>())
.def(init<const Tensor &>())
.def("from_function", +[](const Tensor::DimensionTuple& _dim, PyObject *_f){
......@@ -411,14 +418,44 @@ BOOST_PYTHON_MODULE(xerus) {
.def("random",
+[](std::vector<size_t> _dim) {
return xerus::Tensor::random(_dim);
}).staticmethod("random")
.def("ones", &Tensor::ones, args("dimensions"),
"Constructs a Tensor of given dimensions that is equal to 1 everywhere."
parametersDocstr "dimensions : list or tuple of int"
},
arg("dim"),
"Construct a tensor with i.i.d. Gaussian random entries."
parametersDocstr
"dim : list or tuple of int\n"
"n : list or tuple of int, optional\n"
" number of non-zero entries"
)
.def("random",
+[](std::vector<size_t> _dim, size_t _n) {
return xerus::Tensor::random(_dim, _n);
},
(arg("dim"), arg("n"))
).staticmethod("random")
.def("random_orthogonal",
+[](std::vector<size_t> _dimLhs, std::vector<size_t> _dimRhs) {
return xerus::Tensor::random_orthogonal(_dimLhs, _dimRhs);
}).staticmethod("random_orthogonal")
.def("ones", &Tensor::ones, args("dim"),
"Constructs a tensor of given dimensions that is equal to 1 everywhere."
parametersDocstr "dim : list or tuple of int"
).staticmethod("ones")
.def("identity", &Tensor::identity).staticmethod("identity")
.def("kronecker", &Tensor::kronecker).staticmethod("kronecker")
.def("dirac", static_cast<Tensor (*)(Tensor::DimensionTuple, const Tensor::MultiIndex&)>(&Tensor::dirac))
.def("identity", &Tensor::identity, args("dim"),
"Constructs a Tensor representation of the identity operator with the given dimensions."
parametersDocstr "dim : list or tuple of int"
).staticmethod("identity")
.def("kronecker", &Tensor::kronecker, args("dim"),
"Constructs a Tensor representation of the kronecker delta (=1 where all indices are identical, =0 otherwise)."
parametersDocstr "dim : list or tuple of int"
).staticmethod("kronecker")
.def("dirac", static_cast<Tensor (*)(Tensor::DimensionTuple, const Tensor::MultiIndex&)>(&Tensor::dirac),
(arg("dim"), arg("pos")),
"Construct a Tensor with a single entry equals one and all other zero."
parametersDocstr
"dim : list or tuple of int\n"
"pos : list or tuple of int\n"
" position of the 1 entry"
)
.def("dirac", static_cast<Tensor (*)(Tensor::DimensionTuple, const size_t)>(&Tensor::dirac)).staticmethod("dirac")
.def("has_factor", &Tensor::has_factor)
.def("is_dense", &Tensor::is_dense)
......@@ -477,6 +514,10 @@ BOOST_PYTHON_MODULE(xerus) {
.value("Sparse", Tensor::Representation::Sparse)
// .export_values() // would define Tensor.Sparse = Tensor.Representation.Sparse etc.
;
enum_<Tensor::Initialisation>("Initialisation", "Possible initialisations of new Tensor objects.")
.value("Zero", Tensor::Initialisation::Zero)
.value("None", Tensor::Initialisation::None)
;
} // close Tensor_scope
variable_argument_member_to_tuple_wrapper("Tensor.__call__", "TensorCallOperator");
def("reshuffle", static_cast<Tensor(*)(const Tensor&, const std::vector<size_t>&)>(&reshuffle));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment