Commit 7ed0512a authored by Philipp  Trunschke's avatar Philipp Trunschke
Browse files

bugfix

parent a79de160
Pipeline #1551 failed with stages
in 19 minutes and 7 seconds
......@@ -110,7 +110,7 @@ namespace xerus { namespace misc {
T deserialize(const std::string& _bytes) {
T obj;
std::string header = _bytes.substr(0, _bytes.find("\n"));
XERUS_REQUIRE(header == std::string("Xerus ") + misc::demangle_cxa(typeid(T).name()) + " serialized object.", "Invalid binary input. DBG: " << header);
XERUS_REQUIRE(header == std::string("Xerus ") + misc::demangle_cxa(typeid(T).name()) + " serialized object.", "Invalid binary input. \nExpected: \"" << std::string("Xerus ") + misc::demangle_cxa(typeid(T).name()) + " serialized object.\"\nGot: \"" << header << "\"");
std::stringstream stream(_bytes.substr(_bytes.find("\n")+1)); // +1 because of the last \n
XERUS_REQUIRE(stream, "Unexpected end of stream in load_from_file().");
stream_reader(stream, obj, FileFormat::BINARY);
......
......@@ -29,31 +29,31 @@
using namespace internal;
void expose_blocktt() {
VECTOR_TO_PY(BlockTT, "BlockTTVector");
class_<BlockTT>("BlockTT")
.def(init<const std::vector<size_t>&, const std::vector<size_t>&, const size_t,const size_t>())
.def(init<const TTTensor &,const size_t,const size_t >())
.def("get_component", &BlockTT::get_component, return_value_policy<copy_const_reference>())
.def("set_component", &BlockTT::set_component)
.def("ranks", &BlockTT::ranks)
.def("rank", &BlockTT::rank)
.def("num_components", &BlockTT::num_components)
.def("get_core", &BlockTT::get_core)
.def("get_average_core", &BlockTT::get_average_core)
.def("get_average_tt", &BlockTT::get_average_tt)
.def("order", &BlockTT::order)
.def("move_core", static_cast<void (BlockTT::*)(const size_t,const double,const size_t)>(&BlockTT::move_core),
(arg("position"), arg("epsilon")=EPSILON, arg("maxRank")=std::numeric_limits<size_t>::max())
)
.def("average_core", &BlockTT::average_core)
.def("all_entries_valid", &BlockTT::all_entries_valid)
.def("frob_norm", &BlockTT::frob_norm)
.def("dofs", &BlockTT::dofs)
.def("move_core_left", &BlockTT::move_core_left)
.def("move_core_right", &BlockTT::move_core_left)
;
def("frob_norm", static_cast<value_t (*)(const BlockTT&)>(&frob_norm));
VECTOR_TO_PY(BlockTT, "BlockTTVector");
class_<BlockTT>("BlockTT")
.def(init<const std::vector<size_t>&, const std::vector<size_t>&, const size_t,const size_t>())
.def(init<const TTTensor &,const size_t,const size_t >())
.def("get_component", &BlockTT::get_component, return_value_policy<copy_const_reference>())
.def("set_component", &BlockTT::set_component)
.def("ranks", &BlockTT::ranks)
.def("rank", &BlockTT::rank)
.def("num_components", &BlockTT::num_components)
.def("get_core", &BlockTT::get_core)
.def("get_average_core", &BlockTT::get_average_core)
.def("get_average_tt", &BlockTT::get_average_tt)
.def("order", &BlockTT::order)
.def("move_core", static_cast<void (BlockTT::*)(const size_t,const double,const size_t)>(&BlockTT::move_core),
(arg("position"), arg("epsilon")=EPSILON, arg("maxRank")=std::numeric_limits<size_t>::max())
)
.def("average_core", &BlockTT::average_core)
.def("all_entries_valid", &BlockTT::all_entries_valid)
.def("frob_norm", &BlockTT::frob_norm)
.def("dofs", &BlockTT::dofs)
.def("move_core_left", &BlockTT::move_core_left)
.def("move_core_right", &BlockTT::move_core_left)
;
def("frob_norm", static_cast<value_t (*)(const BlockTT&)>(&frob_norm));
}
......@@ -79,9 +79,11 @@ void expose_misc(module& m) {
});
m.def("serialize", +[](const Tensor &_obj){ return bytes(misc::serialize(_obj)); }, arg("object"));
m.def("serialize", +[](const TensorNetwork &_obj){ return bytes(misc::serialize(_obj)); }, arg("object"));
m.def("serialize", +[](const TTTensor &_obj){ return bytes(misc::serialize(_obj)); }, arg("object"));
m.def("serialize", +[](const TTOperator &_obj){ return bytes(misc::serialize(_obj)); }, arg("object"));
m.def("serialize", +[](const HTTensor &_obj){ return bytes(misc::serialize(_obj)); }, arg("object"));
m.def("serialize", +[](const HTOperator &_obj){ return bytes(misc::serialize(_obj)); }, arg("object"));
m.def("serialize", +[](const TensorNetwork &_obj){ return bytes(misc::serialize(_obj)); }, arg("object"));
m.def("deserialize", +[](std::string _bytes){
// determine type stored in the file
......
......@@ -26,7 +26,20 @@
PYBIND11_MODULE(xerus, m) {
// m.doc() = "...";
m.doc() = "\
The `xerus` library is a general purpose library for numerical calculations with higher order tensors, Tensor-Train Decompositions / Matrix Product States and general Tensor Networks.\n\
The focus of development was the simple usability and adaptibility to any setting that requires higher order tensors or decompositions thereof.\n\
\n\
The key features include:\n\
* Modern code and concepts incorporating many features of the `C++11` standard.\n\
* Full python bindings with very similar syntax for easy transitions from and to c++.\n\
* Calculation with tensors of arbitrary orders using an intuitive Einstein-like notation `A(i,j) = B(i,k,l) * C(k,j,l);`.\n\
* Full implementation of the Tensor-Train decompositions (MPS) with all neccessary capabilities (including Algorithms like ALS, ADF and CG).\n\
* Lazy evaluation of (multiple) tensor contractions featuring heuristics to automatically find efficient contraction orders.\n\
* Direct integration of the `blas` and `lapack`, as high performance linear algebra backends.\n\
* Fast sparse tensor calculation by usage of the `suiteSparse` sparse matrix capabilities.\n\
* Capabilites to handle arbitrary Tensor Networks.\n\
";
// xerus version
m.attr("VERSION_MAJOR") = VERSION_MAJOR;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment