Commit 211747f3 authored by RoteKekse's avatar RoteKekse
Browse files

Merge branch 'SALSA' into 'development'

Salsa

See merge request !5
parents b584435e 936f0db3
Pipeline #2164 failed with stages
in 3 minutes and 44 seconds
......@@ -12,4 +12,9 @@ doc/parseDoxytags
doc/xerus.tagfile
doc/xerus.tags
config.mk
XerusTest
XerusTest*
core.*
profile
scorep*
*.bkp
*.old
......@@ -293,10 +293,10 @@ install:
endif
XerusTest_static: $(MINIMAL_DEPS) $(UNIT_TEST_OBJECTS) $(TEST_OBJECTS) build/libxerus.a build/libxerus_misc.a | libxerus_misc_dependencies libxerus_dependencies
$(CXX) -D XERUS_UNITTEST $(FLAGS) $(UNIT_TEST_OBJECTS) $(TEST_OBJECTS) build/libxerus.a build/libxerus_misc.a $(SUITESPARSE) $(LAPACK_LIBRARIES) $(ARPACK_LIBRARIES) $(BLAS_LIBRARIES) $(BOOST_LIBS) $(CALLSTACK_LIBS) -o $(TEST_NAME)
$(CXX) -D XERUS_UNITTEST $(FLAGS) $(UNIT_TEST_OBJECTS) $(TEST_OBJECTS) build/libxerus.a build/libxerus_misc.a $(SUITESPARSE) $(LAPACK_LIBRARIES) $(ARPACK_LIBRARIES) $(BLAS_LIBRARIES) $(BOOST_LIBS) $(CALLSTACK_LIBS) -o $@
XerusTest_dynamic: $(MINIMAL_DEPS) $(UNIT_TEST_OBJECTS) $(TEST_OBJECTS) build/libxerus.so build/libxerus_misc.so | libxerus_misc_dependencies libxerus_dependencies
$(CXX) -D XERUS_UNITTEST $(FLAGS) $(UNIT_TEST_OBJECTS) $(TEST_OBJECTS) build/libxerus.so build/libxerus_misc.so $(SUITESPARSE) $(LAPACK_LIBRARIES) $(ARPACK_LIBRARIES) $(BLAS_LIBRARIES) $(BOOST_LIBS) $(CALLSTACK_LIBS) -o $(TEST_NAME)
$(CXX) -D XERUS_UNITTEST $(FLAGS) $(UNIT_TEST_OBJECTS) $(TEST_OBJECTS) build/libxerus.so build/libxerus_misc.so $(SUITESPARSE) $(LAPACK_LIBRARIES) $(ARPACK_LIBRARIES) $(BLAS_LIBRARIES) $(BOOST_LIBS) $(CALLSTACK_LIBS) -o $@
build/print_boost_version: src/print_boost_version.cpp
@$(CXX) -o $@ $<
......@@ -345,12 +345,13 @@ doc:
clean:
rm -fr build
-rm -f $(TEST_NAME)
-rm -f XerusTest_static
-rm -f XerusTest_dynamic
-rm -f include/xerus.h.gch
make -C doc clean
-rm xerus/libxerus_misc.so
-rm xerus/libxerus.so
-rm xerus/xerus.so
-rm -f xerus/libxerus_misc.so
-rm -f xerus/libxerus.so
-rm -f xerus/xerus.so
......
# About #
About
=====
The `xerus` library is a general purpose library for numerical calculations with higher order tensors, Tensor-Train Decompositions / Matrix Product States and other Tensor Networks.
The focus of development was the simple usability and adaptibility to any setting that requires higher order tensors or decompositions thereof.
......@@ -8,7 +9,8 @@ For tutorials and a documentation see <a href="http://libxerus.org">the document
The source code is licenced under the AGPL v3.0. For more details see the LICENSE file.
# Features #
Features
========
+ Intuitive notation for expressions involving tensors of arbitrary degree: `A(i,j) = B(i,k,l) * C(k,j,l)`;
+ Lazy evaluation of tensor expressions with automatically optimized contraction orders.
......@@ -16,11 +18,14 @@ The source code is licenced under the AGPL v3.0. For more details see the LICENS
+ Implementations of common algorithms like the ALS, (multi-site-)DMRG, ASD, AFD, CG, and some less common ones e.g. to find the maximal entries in a Tensor Train.
# Building the Xerus library #
Building the Xerus library
==========================
Building from source
--------------------
Copy the default configuration and modify it for your needs
> cp config.mk.default config.mk
> nano config.mk
Test whether everything works correctly with
......@@ -37,8 +42,18 @@ To install the python bindings run
For more details see <a href="https://www.libxerus.org/building_xerus/">the "Building Xerus" page in the documentation</a>.
Building a conda package
------------------------
Modify `conda/meta.yaml`, `build.sh` and `run_test.sh` as needed and build the conda package.
For example, to create a conda package of the `SALSA` branch of `xerus` run the following commands.
> cd conda
> conda-build . -c conda-forge --python=3.7 --numpy=1.20
> conda install --use-local xerus_salsa
# Issues #
Issues
======
Should you have any problems with the library do not hesitate to contact us at contact[at]libxerus.org or describe your problem in the issuetracker.
......@@ -31,14 +31,7 @@ ln -s ${PREFIX}/include/ ${PREFIX}/include/suitesparse
make python
${PYTHON} -m pip install . --no-deps -vv
# INCLUDE_PATH="${PREFIX}/include"
# mkdir -p ${INCLUDE_PATH}
# cp include/xerus.h ${INCLUDE_PATH}
# cp -r include/xerus ${INCLUDE_PATH}
# LIBRARY_PATH="${PREFIX}/lib"
# mkdir -p ${LIBRARY_PATH}
# cp build/libxerus.so ${LIBRARY_PATH}
# cp build/libxerus_misc.so ${LIBRARY_PATH}
rm config.mk
# cp include/xerus.h ${PREFIX}/include/
# cp -r include/xerus ${PREFIX}/include/
# cp build/libxerus.so ${PREFIX}/lib/
# cp build/libxerus_misc.so ${PREFIX}/lib/
{% set name = "Xerus" %}
{% set version = "4.0.1" %}
{% set branch = "v4.0.1_conda" %}
{% set branch = "SALSA" %}
package:
name: {{ name|lower }}
name: {{ name|lower + '_' + branch|lower }}
version: {{ version }}
source:
......@@ -19,29 +19,36 @@ requirements:
build:
- {{ compiler('cxx') }}
- make
- {{ pin_compatible('python', max_pin='x.x') }}
- pip >=18.1
host:
- python
- numpy
- openblas
- suitesparse
- lapack
- boost-cpp >=1.69.0 # boost-cpp: boost without python
- llvm-openmp # [osx]
- libgomp # [linux]
#- gdb # bfd
#- backward-cpp # option for better python-like backtraces
run:
- {{ pin_compatible('python', max_pin='x.x') }}
- {{ pin_compatible('numpy') }}
- {{ pin_compatible('openblas', max_pin='x.x') }}
- {{ pin_compatible('suitesparse', max_pin='x.x') }}
- {{ pin_compatible('lapack') }}
- {{ pin_compatible('liblapacke') }}
- {{ pin_compatible('boost-cpp', max_pin='x.x.x') }}
- {{ pin_compatible('llvm-openmp') }} # [osx]
- {{ pin_compatible('libgomp') }} # [linux]
run:
- python
- numpy
- openblas
- suitesparse
- lapack
- liblapacke
# - boost-cpp >=1.69.0 # boost-cpp: boost without python
- boost-cpp # boost-cpp: boost without python
- llvm-openmp # [osx]
- libgomp # [linux]
# #- gdb # bfd
# #- backward-cpp # option for better python-like backtraces
# run:
test:
# requires:
# - pytest # pytest xerus/src/pyTests
files:
- VERSION
source_files:
......
......@@ -68,6 +68,7 @@
#include "xerus/algorithms/randomSVD.h"
#include "xerus/applications/uq.h"
#include "xerus/applications/uqAdf.h"
#include "xerus/applications/uqSALSA.h"
#include "xerus/examples/specificLowRankTensors.h"
......
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2019 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
* @file
* @brief Header file for the ADF algorithm and its variants.
*/
#pragma once
#include "../ttNetwork.h"
namespace xerus { namespace uq {
class SALSA {
TTTensor x;
std::vector<std::vector<Tensor>> measures;
std::vector<Tensor> values;
const size_t M;
const size_t N;
const size_t P;
double alpha, omega, smin;
std::pair<size_t, size_t> trainingSet;
std::pair<size_t, size_t> validationSet;
double valueNorm_trainingSet;
double valueNorm_validationSet;
std::vector<std::vector<Tensor>> leftLHSStack; // contains successive contractions of x.T@A.T@A@x
std::vector<std::vector<Tensor>> leftRHSStack; // contains successive contractions of x.T@A.T@b
std::vector<std::vector<Tensor>> rightStack; // contains successive contractions of A@x
std::vector<Tensor> leftRegularizationStack;
std::vector<Tensor> rightRegularizationStack;
std::vector<std::vector<double>> singularValues;
std::vector<double> weightedNorms; //TODO: rename: densities
std::vector<bool> maxIRstepsReached;
bool initialized = false;
public:
double controlSetFraction = 0.1;
// Convergence parameters
double targetResidual = 1e-8;
// Stagnation/Divergence parameters
double minDecrease = 1e-3;
size_t maxSweeps = 1000;
size_t trackingPeriodLength = 10;
size_t maxStagnatingEpochs = 10;
// Inactive rank parameters
size_t kmin = 2;
std::vector<size_t> maxRanks;
// IRLS parameters
size_t maxIRsteps = 3;
double IRtolerance = 0.05;
double sparsityThreshold = 1e-4;
// SALSA parameters
double fomega = 1.05;
double omegaFactor = 1;
// LASSO parameters
double falpha = 1.05;
double alphaFactor = 1;
std::vector<Tensor> basisWeights;
/* // Reweighting parameters */
/* std::vector<double> weights; */
double initialResidual; //TODO: rename
size_t bestIteration;
struct State {
double alpha;
double omega;
TTTensor x;
double trainingResidual;
double validationResidual;
} bestState;
SALSA(const TTTensor& _x, const std::vector<Tensor>& _measures, const Tensor& _values);
void run();
private:
void move_core_left(const bool adapt);
void move_core_right(const bool adapt);
void calc_left_stack(const size_t _position);
void calc_right_stack(const size_t _position);
void adapt_rank(Tensor& _U, Tensor& _S, Tensor& _Vt, const size_t _maxRank, const double _threshold) const;
double residual(const std::pair<size_t, size_t>& _slice) const;
double slow_residual(const std::pair<size_t, size_t>& _slice) const;
std::pair<Tensor, Tensor> ls_operator_and_rhs(const std::pair<size_t, size_t>& _slice) const;
Tensor omega_operator() const;
Tensor alpha_operator() const;
void solve_local();
void print_parameters() const;
void initialize();
std::string print_fractional_ranks() const;
std::string print_densities() const;
};
}}
......@@ -308,11 +308,11 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
Tensor delta(x.get_core(_setId).dimensions);
Tensor dyadComp, tmp;
#pragma omp declare reduction(+: Tensor: omp_out += omp_in) initializer(omp_priv = Tensor(omp_orig.dimensions))
if(_corePosition > 0) {
const Tensor shuffledX = reshuffle(x.get_core(_setId), {1, 0, 2});
//TODO: schedule, threadprivate(dyadComp, tmp)
#pragma omp declare reduction(+: Tensor: omp_out += omp_in) initializer(omp_priv = Tensor(omp_orig.dimensions))
#pragma omp parallel for reduction(+: delta) firstprivate(_corePosition, _setId, dyadComp, tmp, shuffledX) default(none)
for(size_t jIdx = 0; jIdx < sets[_setId].size(); ++jIdx) {
const size_t j = sets[_setId][jIdx];
......@@ -350,8 +350,6 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
Tensor shuffledX = x.get_core(_setId);
shuffledX.reinterpret_dimensions({shuffledX.dimensions[1], shuffledX.dimensions[2]});
//TODO: schedule, threadprivate(dyadComp, tmp)
#pragma omp declare reduction(+: Tensor: omp_out += omp_in) initializer(omp_priv = Tensor(omp_orig.dimensions))
#pragma omp parallel for reduction(+: delta) firstprivate(_corePosition, _setId, dyadComp, tmp, shuffledX) default(none)
for(size_t jIdx = 0; jIdx < sets[_setId].size(); ++jIdx) {
const size_t j = sets[_setId][jIdx];
......
This diff is collapsed.
......@@ -223,7 +223,7 @@ namespace xerus {
double work = 0;
lapack_int lwork = -1;
lapack_int min = std::min(m,n);
dgesdd_( &job, &n, &m, nullptr, &n, nullptr, nullptr, &n, nullptr, &min, &work, &lwork, nullptr, &info );
dgesdd_( &job, &n, &m, nullptr, &n, nullptr, nullptr, &n, nullptr, &min, &work, &lwork, nullptr, &info, 1 );
REQUIRE(info == 0, "work array size query of dgesdd returned " << info);
return lapack_int(work);
}
......@@ -236,7 +236,7 @@ namespace xerus {
lapack_int min = std::min(m,n);
// if A = U*S*V^T, then A^T = V^T*S*U^T, so instead of transposing all input and output matrices we can simply exchange the order of U and Vt
dgesdd_( &job, &n, &m, a, &n, s, vt, &n, u, &min, work, &lwork, iwork, &info );
dgesdd_( &job, &n, &m, a, &n, s, vt, &n, u, &min, work, &lwork, iwork, &info, 1 );
REQUIRE(info == 0, "dgesdd failed with info " << info);
}
......
......@@ -54,15 +54,15 @@ void expose_indexedTensors(module& m) {
}, keep_alive<0, 1>(), keep_alive<0, 2>(), return_value_policy::take_ownership)
class_<internal::IndexedTensorReadOnly<TensorNetwork>>(m,"IndexedTensorNetworkReadOnly")
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__sub__", -, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__sub__", -, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<TensorNetwork>, value_t, IndexedTensorReadOnly<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__rmul__", *, IndexedTensorReadOnly<TensorNetwork>, value_t, IndexedTensorReadOnly<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__truediv__", /, IndexedTensorReadOnly<TensorNetwork>, value_t, IndexedTensorReadOnly<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__sub__", -, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__sub__", -, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<TensorNetwork>, value_t, IndexedTensorReadOnly<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__rmul__", *, IndexedTensorReadOnly<TensorNetwork>, value_t, IndexedTensorReadOnly<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__truediv__", /, IndexedTensorReadOnly<TensorNetwork>, value_t, IndexedTensorReadOnly<TensorNetwork>)
.def("frob_norm", static_cast<value_t (*)(const IndexedTensorReadOnly<TensorNetwork> &)>(&frob_norm<TensorNetwork>))
.def("__float__", [](const IndexedTensorReadOnly<TensorNetwork> &_self){ return value_t(_self); })
;
......@@ -80,14 +80,14 @@ void expose_indexedTensors(module& m) {
;
class_<internal::IndexedTensorReadOnly<Tensor>>(m,"IndexedTensorReadOnly")
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__sub__", -, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<Tensor>, value_t, IndexedTensorReadOnly<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__rmul__", *, IndexedTensorReadOnly<Tensor>, value_t, IndexedTensorReadOnly<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__truediv__", /, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__truediv__", /, IndexedTensorReadOnly<Tensor>, value_t, IndexedTensorMoveable<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__sub__", -, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<Tensor>, value_t, IndexedTensorReadOnly<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__rmul__", *, IndexedTensorReadOnly<Tensor>, value_t, IndexedTensorReadOnly<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__truediv__", /, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__truediv__", /, IndexedTensorReadOnly<Tensor>, value_t, IndexedTensorMoveable<Tensor>)
.def("frob_norm", static_cast<value_t (*)(const IndexedTensorReadOnly<Tensor> &)>(&frob_norm<Tensor>))
.def("__float__", [](const IndexedTensorReadOnly<Tensor> &_self){ return value_t(_self); })
;
......
......@@ -33,28 +33,29 @@ void expose_misc(module& m) {
.value("TSV", misc::FileFormat::TSV)
;
/* m.def("save_to_file", static_cast<void (*)(const Tensor&)>(&misc::save_to_file), arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY); */
/* m.def("save_to_file", static_cast<void (*)(const TTTensor&)>(&misc::save_to_file), arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY); */
/* m.def("save_to_file", static_cast<void (*)(const TTOperator&)>(&misc::save_to_file), arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY); */
/* m.def("save_to_file", static_cast<void (*)(const TensorNetwork&)>(&misc::save_to_file), arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY); */
m.def("save_to_file", +[](const Tensor &_obj, const std::string &_filename, misc::FileFormat _format){
PyErr_WarnEx(PyExc_DeprecationWarning, "save_to_file() is deprecated, use Pythons pickle library instead.", 1);
misc::save_to_file(_obj, _filename, _format);
}, arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY);
m.def("save_to_file", +[](const TensorNetwork &_obj, const std::string &_filename, misc::FileFormat _format){
PyErr_WarnEx(PyExc_DeprecationWarning, "save_to_file() is deprecated, use Pythons pickle library instead.", 1);
misc::save_to_file(_obj, _filename, _format);
}, arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY);
m.def("save_to_file", +[](const TTTensor &_obj, const std::string &_filename, misc::FileFormat _format){
PyErr_WarnEx(PyExc_DeprecationWarning, "save_to_file() is deprecated, use Pythons pickle library instead.", 1);
misc::save_to_file(_obj, _filename, _format);
}, arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY);
m.def("save_to_file", +[](const TTOperator &_obj, const std::string &_filename, misc::FileFormat _format){
PyErr_WarnEx(PyExc_DeprecationWarning, "save_to_file() is deprecated, use Pythons pickle library instead.", 1);
misc::save_to_file(_obj, _filename, _format);
}, arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY);
m.def("load_from_file", +[](std::string _filename){
// determine type stored in the file
PyErr_WarnEx(PyExc_DeprecationWarning, "load_from_file() is deprecated, use Pythons pickle library instead.", 1);
std::ifstream in(_filename);
if (!in) {
throw std::runtime_error("could not read file '" + _filename + "'");
......
......@@ -154,5 +154,40 @@ void expose_recoveryAlgorithms(module& m) {
.value("Hermite", uq::PolynomBasis::Hermite)
.value("Legendre", uq::PolynomBasis::Legendre)
;
class_<uq::SALSA::State>(m, "uqSALSAState")
.def_readonly("alpha", &uq::SALSA::State::alpha)
.def_readonly("omega", &uq::SALSA::State::omega)
.def_readonly("x", &uq::SALSA::State::x)
.def_readonly("bestTrainingResidual", &uq::SALSA::State::trainingResidual)
.def_readonly("bestValidationResidual", &uq::SALSA::State::validationResidual)
;
class_<uq::SALSA>(m, "uqSALSA")
.def(init<TTTensor, std::vector<Tensor>, Tensor>())
.def_readwrite("controlSetFraction", &uq::SALSA::controlSetFraction)
.def_readwrite("targetResidual", &uq::SALSA::targetResidual)
.def_readwrite("minDecrease", &uq::SALSA::minDecrease)
.def_readwrite("maxSweeps", &uq::SALSA::maxSweeps)
.def_readwrite("trackingPeriodLength", &uq::SALSA::trackingPeriodLength)
.def_readwrite("maxStagnatingEpochs", &uq::SALSA::maxStagnatingEpochs)
.def_readwrite("kmin", &uq::SALSA::kmin)
.def_readwrite("maxRanks", &uq::SALSA::maxRanks)
.def_readwrite("maxIRsteps", &uq::SALSA::maxIRsteps)
.def_readwrite("IRtolerance", &uq::SALSA::IRtolerance)
.def_readwrite("sparsityThreshold", &uq::SALSA::sparsityThreshold)
.def_readwrite("fomega", &uq::SALSA::fomega)
.def_readwrite("omegaFactor", &uq::SALSA::omegaFactor)
.def_readwrite("falpha", &uq::SALSA::falpha)
.def_readwrite("alphaFactor", &uq::SALSA::alphaFactor)
.def_readwrite("basisWeights", &uq::SALSA::basisWeights)
.def_readonly("initialResidual", &uq::SALSA::initialResidual)
.def_readonly("bestIteration", &uq::SALSA::bestIteration)
.def_readonly("bestState", &uq::SALSA::bestState)
.def("run", &uq::SALSA::run)
;
}
......@@ -23,12 +23,9 @@ Tensor Tensor_from_buffer(buffer& _b) {
return Tensor({}, Tensor::Representation::Dense, Tensor::Initialisation::None);
}
std::vector<size_t> dims(info.shape.begin(), info.shape.end());
std::vector<size_t> strides(info.strides.begin(), info.strides.end());
Tensor result(dims, Tensor::Representation::Dense, Tensor::Initialisation::None);
std::vector<size_t> dimensions(info.shape.begin(), info.shape.end());
Tensor result(dimensions, Tensor::Representation::Dense, Tensor::Initialisation::None);
misc::copy(result.get_unsanitized_dense_data(), static_cast<double*>(info.ptr), result.size);
return result;
}
......@@ -43,16 +40,17 @@ void expose_tensor(module& m) {
;
class_<Tensor>(m, "Tensor", "a non-decomposed Tensor in either sparse or dense representation", buffer_protocol())
.def_buffer([](Tensor& t) -> buffer_info {
return buffer_info(
t.get_dense_data(), /* Pointer to buffer */
sizeof(value_t), /* Size of one scalar */
format_descriptor<value_t>::format(), /* Python struct-style format descriptor */
t.order(), /* Number of dimensions */
t.dimensions, /* Buffer dimensions */
strides_from_dimensions_and_item_size(t.dimensions, sizeof(value_t)) /* Strides (in bytes) for each index */
);
})
// .def_buffer([](Tensor& t) -> buffer_info {
// std::cerr << "buffer" << std::endl;
// return buffer_info(
// t.get_dense_data(), /* Pointer to buffer */
// sizeof(value_t), /* Size of one scalar */
// format_descriptor<value_t>::format(), /* Python struct-style format descriptor */
// t.order(), /* Number of dimensions */
// t.dimensions, /* Buffer dimensions */
// strides_from_dimensions_and_item_size(t.dimensions, sizeof(value_t)) /* Strides (in bytes) for each index */
// );
// })
.def(pickle(
[](const Tensor &_self) { // __getstate__
return bytes(misc::serialize(_self));
......@@ -76,6 +74,15 @@ void expose_tensor(module& m) {
return Tensor(_dim, _f);
})
.def_static("from_buffer", &Tensor_from_buffer)
.def_static("from_ndarray", &Tensor_from_buffer)
.def("to_ndarray", +[](const xerus::Tensor& _self){
static_assert(std::is_same<value_t, double>::value);
Tensor* shcp = new xerus::Tensor(_self); // create a shallow copy (this effectively increases its reference count for the required amount of time)
shcp->ensure_own_data_and_apply_factor();
shcp->use_dense_representation();
capsule cps(shcp, [](void *v) { delete reinterpret_cast<xerus::Tensor*>(v); });
return array(dtype::of<double>(), shcp->dimensions, shcp->get_unsanitized_dense_data(), cps);
})
.def_property_readonly("dimensions", +[](Tensor &_A) {
return _A.dimensions;
})
......@@ -193,7 +200,7 @@ arg("dim")
.def("__str__", &Tensor::to_string)
/* .def(-self) */
.def("__neg__",
+[](TTTensor& _self) {
+[](Tensor& _self) {
return (-1)*_self;
})
.def(self + self)
......@@ -206,7 +213,7 @@ arg("dim")
.def(self / value_t())
/* .def(self /= self) */
.def("__itruediv__",
+[](TTTensor& _self, const value_t _other) {
+[](Tensor& _self, const value_t _other) {
return (_self *= (1/_other));
})
......
......@@ -3,10 +3,10 @@
void expose_tensorNetwork(module& m) {
class_<TensorNetwork>(m, "TensorNetwork")
.def(pickle(
[](const TensorNetwork &_self) { // __getstate__
+[](const TensorNetwork &_self) { // __getstate__
return bytes(misc::serialize(_self));
},
[](bytes _bytes) { // __setstate__
+[](bytes _bytes) { // __setstate__
return misc::deserialize<TensorNetwork>(_bytes);
}
))
......
......@@ -3,7 +3,7 @@
void expose_ttnetwork(module& m) {
class_<TTTensor, TensorNetwork>(m, "TTTensor")
.def(pickle(
[](const TTTensor &_self) { // __getstate__
[](const TTTensor& _self) { // __getstate__
return bytes(misc::serialize(_self));
},
[](bytes _bytes) { // __setstate__
......@@ -11,7 +11,7 @@ void expose_ttnetwork(module& m) {
}
))
.def(init<>(), "constructs an empty TTTensor")
.def(init<const TTTensor &>())
.def(init<const TTTensor&>())
.def(init<const Tensor&>())
.def(init<const Tensor&, value_t>())
.def(init<const Tensor&, value_t, size_t>())
......@@ -79,6 +79,14 @@ void expose_ttnetwork(module& m) {
m.def("dyadic_product", static_cast<TTTensor (*)(const std::vector<TTTensor> &)>(&dyadic_product));
class_<TTOperator, TensorNetwork>(m, "TTOperator")
.def(pickle(
[](const TTOperator& _self) { // __getstate__