Commit 109e2476 authored by Philipp  Trunschke's avatar Philipp Trunschke

fix segfault when creating numpy arrays

parent 6720daac
Pipeline #921 passed with stages
in 9 minutes and 2 seconds
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
......@@ -23,6 +23,7 @@
*/
#define NO_IMPORT_ARRAY
#include "misc.h"
using namespace internal;
......@@ -40,31 +41,31 @@ void expose_factorizations() {
}, return_value_policy<manage_new_object, // result is treated as a new object
with_custodian_and_ward_postcall<0,1>>(), // but the argument will not be destroyed before the result is destroyed
(arg("source"), arg("maxRank")=std::numeric_limits<size_t>::max(), arg("eps")=EPSILON)
);
);
class_<QR, bases<TensorFactorisation>, boost::noncopyable>("QR_temporary", boost::python::no_init);
def("QR", +[](IndexedTensor<Tensor> &_rhs)->TensorFactorisation*{
return new QR(std::move(_rhs));
}, return_value_policy<manage_new_object, // result is treated as a new object
with_custodian_and_ward_postcall<0,1>>()); // but the argument will not be destroyed before the result is destroyed
class_<RQ, bases<TensorFactorisation>, boost::noncopyable>("RQ_temporary", boost::python::no_init);
def("RQ", +[](IndexedTensor<Tensor> &_rhs)->TensorFactorisation*{
return new RQ(std::move(_rhs));
}, return_value_policy<manage_new_object, // result is treated as a new object
with_custodian_and_ward_postcall<0,1>>()); // but the argument will not be destroyed before the result is destroyed
class_<QC, bases<TensorFactorisation>, boost::noncopyable>("QC_temporary", boost::python::no_init);
def("QC", +[](IndexedTensor<Tensor> &_rhs)->TensorFactorisation*{
return new QC(std::move(_rhs));
}, return_value_policy<manage_new_object, // result is treated as a new object
with_custodian_and_ward_postcall<0,1>>()); // but the argument will not be destroyed before the result is destroyed
class_<CQ, bases<TensorFactorisation>, boost::noncopyable>("CQ_temporary", boost::python::no_init);
def("CQ", +[](IndexedTensor<Tensor> &_rhs)->TensorFactorisation*{
return new CQ(std::move(_rhs));
}, return_value_policy<manage_new_object, // result is treated as a new object
with_custodian_and_ward_postcall<0,1>>()); // but the argument will not be destroyed before the result is destroyed
}
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
......@@ -23,6 +23,7 @@
*/
#define NO_IMPORT_ARRAY
#include "misc.h"
void expose_indexedTensors() {
......@@ -47,7 +48,7 @@ void expose_indexedTensors() {
" i += 1\n"
, scope().attr("__dict__"));
VECTOR_TO_PY(Index, "IndexVector");
implicitly_convertible<internal::IndexedTensorReadOnly<Tensor>, internal::IndexedTensorMoveable<TensorNetwork>>();
implicitly_convertible<internal::IndexedTensorWritable<Tensor>, internal::IndexedTensorMoveable<TensorNetwork>>();
implicitly_convertible<internal::IndexedTensorMoveable<Tensor>, internal::IndexedTensorMoveable<TensorNetwork>>();
......@@ -64,7 +65,7 @@ void expose_indexedTensors() {
LOG(pydebug, "python wrapper: " name);\
return new res_type(std::move(_l) op std::move(_r)); \
}, return_value_policy<manage_new_object>())
class_<internal::IndexedTensorReadOnly<TensorNetwork>, boost::noncopyable>("IndexedTensorNetworkReadOnly", no_init)
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
......@@ -75,17 +76,17 @@ void expose_indexedTensors() {
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorMoveable<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
.def("__mul__",
.def("__mul__",
+[](internal::IndexedTensorReadOnly<TensorNetwork> &_l, value_t _r) -> internal::IndexedTensorReadOnly<TensorNetwork>* {
LOG(pydebug, "mul TN ro * scalar");
return new internal::IndexedTensorMoveable<TensorNetwork>(std::move(_l) * _r);
}, return_value_policy<manage_new_object>())
.def("__rmul__",
.def("__rmul__",
+[](value_t _r, internal::IndexedTensorReadOnly<TensorNetwork> &_l) -> internal::IndexedTensorReadOnly<TensorNetwork>* {
LOG(pydebug, "mul TN scalar * ro");
return new internal::IndexedTensorMoveable<TensorNetwork>(std::move(_l) * _r);
}, return_value_policy<manage_new_object>())
.def("__div__",
.def("__div__",
+[](internal::IndexedTensorReadOnly<TensorNetwork> &_l, value_t _r) -> internal::IndexedTensorReadOnly<TensorNetwork>* {
LOG(pydebug, "div TN ro / scalar");
return new internal::IndexedTensorMoveable<TensorNetwork>(std::move(_l) / _r);
......@@ -93,41 +94,41 @@ void expose_indexedTensors() {
.def("frob_norm", static_cast<value_t (*)(const IndexedTensorReadOnly<TensorNetwork> &)>(&frob_norm<TensorNetwork>))
.def(float_(self)) // cast to double
;
class_<internal::IndexedTensorWritable<TensorNetwork>, boost::noncopyable, bases<internal::IndexedTensorReadOnly<TensorNetwork>>>("IndexedTensorNetworkWriteable", no_init)
;
class_<internal::IndexedTensorMoveable<TensorNetwork>, boost::noncopyable, bases<internal::IndexedTensorWritable<TensorNetwork>>>("IndexedTensorNetworkMoveable", no_init)
;
class_<internal::IndexedTensor<TensorNetwork>, boost::noncopyable, bases<internal::IndexedTensorWritable<TensorNetwork>>>("IndexedTensorNetwork", no_init)
.def("__lshift__",
.def("__lshift__",
+[](internal::IndexedTensor<TensorNetwork> &_lhs, internal::IndexedTensorReadOnly<Tensor> &_rhs) {
std::move(_lhs) = std::move(_rhs);
})
.def("__lshift__",
.def("__lshift__",
+[](internal::IndexedTensor<TensorNetwork> &_lhs, internal::IndexedTensorReadOnly<TensorNetwork> &_rhs) {
std::move(_lhs) = std::move(_rhs);
})
;
// --------------------------------------------- indexedTensor<Tensor>
class_<internal::IndexedTensorReadOnly<Tensor>, boost::noncopyable>("IndexedTensorReadOnly", no_init)
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__sub__", -, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__div__", /, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
.def("__mul__",
.def("__mul__",
+[](internal::IndexedTensorReadOnly<Tensor> &_l, value_t _r) -> internal::IndexedTensorReadOnly<Tensor>* {
LOG(pydebug, "mul ro * scalar");
return new internal::IndexedTensorMoveable<Tensor>(std::move(_l) * _r);
}, return_value_policy<manage_new_object>())
.def("__rmul__",
.def("__rmul__",
+[](value_t _r, internal::IndexedTensorReadOnly<Tensor> &_l) -> internal::IndexedTensorReadOnly<Tensor>* {
LOG(pydebug, "mul scalar * ro");
return new internal::IndexedTensorMoveable<Tensor>(std::move(_l) * _r);
}, return_value_policy<manage_new_object>())
.def("__div__",
.def("__div__",
+[](internal::IndexedTensorReadOnly<Tensor> &_l, value_t _r) -> internal::IndexedTensorReadOnly<Tensor>* {
LOG(pydebug, "div ro / scalar");
return new internal::IndexedTensorMoveable<Tensor>(std::move(_l) / _r);
......@@ -140,11 +141,11 @@ void expose_indexedTensors() {
class_<internal::IndexedTensorMoveable<Tensor>, boost::noncopyable, bases<internal::IndexedTensorWritable<Tensor>>>("IndexedTensorMoveable", no_init)
;
class_<internal::IndexedTensor<Tensor>, boost::noncopyable, bases<internal::IndexedTensorWritable<Tensor>>>("IndexedTensor", no_init)
.def("__lshift__",
.def("__lshift__",
+[](internal::IndexedTensor<Tensor> &_lhs, internal::IndexedTensorReadOnly<Tensor> &_rhs) {
std::move(_lhs) = std::move(_rhs);
})
.def("__lshift__",
.def("__lshift__",
+[](internal::IndexedTensor<Tensor> &_lhs, internal::IndexedTensorReadOnly<TensorNetwork> &_rhs) {
std::move(_lhs) = std::move(_rhs);
})
......
This diff is collapsed.
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
......@@ -23,6 +23,7 @@
*/
#define NO_IMPORT_ARRAY
#include "misc.h"
......@@ -46,7 +47,7 @@ void expose_misc() {
def("frob_norm", +[](const TensorNetwork& _x){ return _x.frob_norm(); });
def("frob_norm", static_cast<value_t (*)(const IndexedTensorReadOnly<Tensor>&)>(&frob_norm));
def("frob_norm", static_cast<value_t (*)(const IndexedTensorReadOnly<TensorNetwork>&)>(&frob_norm));
def("approx_equal", static_cast<bool (*)(const TensorNetwork&, const TensorNetwork&, double)>(&approx_equal));
def("approx_equal", static_cast<bool (*)(const Tensor&, const TensorNetwork&, double)>(&approx_equal));
def("approx_equal", static_cast<bool (*)(const TensorNetwork&, const Tensor&, double)>(&approx_equal));
......@@ -63,32 +64,32 @@ void expose_misc() {
def("approx_equal", +[](const TensorNetwork& _l, const TensorNetwork& _r) {
return approx_equal(_l, _r);
});
def("log", +[](std::string _msg){
LOG_SHORT(info, _msg);
});
enum_<misc::FileFormat>("FileFormat")
.value("BINARY", misc::FileFormat::BINARY)
.value("TSV", misc::FileFormat::TSV)
;
def("save_to_file", +[](const Tensor &_obj, const std::string &_filename, misc::FileFormat _format){
misc::save_to_file(_obj, _filename, _format);
}, (arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY) );
def("save_to_file", +[](const TensorNetwork &_obj, const std::string &_filename, misc::FileFormat _format){
misc::save_to_file(_obj, _filename, _format);
}, (arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY) );
def("save_to_file", +[](const TTTensor &_obj, const std::string &_filename, misc::FileFormat _format){
misc::save_to_file(_obj, _filename, _format);
}, (arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY) );
def("save_to_file", +[](const TTOperator &_obj, const std::string &_filename, misc::FileFormat _format){
misc::save_to_file(_obj, _filename, _format);
}, (arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY) );
def("load_from_file", +[](std::string _filename){
// determine type stored in the file
std::ifstream in(_filename);
......@@ -114,12 +115,12 @@ void expose_misc() {
LOG_SHORT(warning, "unknown class type '" << classname << "' in file '" << _filename << "'");
return object();
});
// identity returns the cpp name to a python object
// def("identity", identity_);
def("xethrow", +[](){XERUS_THROW(misc::generic_error() << misc::get_call_stack());});
// translate all exceptions thrown inside xerus to own python exception class
static char fully_qualified_gen_error_name[] = "xerus.generic_error";
static PyObject* py_gen_error = PyErr_NewException(fully_qualified_gen_error_name, PyExc_Exception, 0);
......
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
......@@ -33,6 +33,7 @@
#pragma GCC diagnostic ignored "-Wcast-qual"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wpedantic"
#define PY_ARRAY_UNIQUE_SYMBOL XERUS_ARRAY_API
#include <numpy/ndarrayobject.h>
#pragma GCC diagnostic pop
#include "xerus.h"
......
......@@ -23,6 +23,7 @@
*/
#define NO_IMPORT_ARRAY
#include "misc.h"
// using namespace uq;
......
This diff is collapsed.
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
......@@ -23,6 +23,7 @@
*/
#define NO_IMPORT_ARRAY
#include "misc.h"
void expose_tensorNetwork() {
......@@ -76,7 +77,7 @@ void expose_tensorNetwork() {
(arg("from"), arg("to"), arg("allowRankReduction")=true)
)
.def("reduce_representation", &TensorNetwork::reduce_representation)
.def("find_common_edge",
.def("find_common_edge",
+[](TensorNetwork &_this, size_t _nodeA, size_t _nodeB){
const auto result = _this.find_common_edge(_nodeA, _nodeB);
return boost::python::make_tuple(result.first, result.second);
......@@ -94,7 +95,7 @@ void expose_tensorNetwork() {
.def("draw", &TensorNetwork::draw)
.def("frob_norm", &TensorNetwork::frob_norm)
;
class_<TensorNetwork::TensorNode>("TensorNode")
.def("size", &TensorNetwork::TensorNode::size)
.def("degree", &TensorNetwork::TensorNode::degree)
......@@ -111,7 +112,7 @@ void expose_tensorNetwork() {
return _this.neighbors;
})
;
class_<TensorNetwork::Link>("TensorNetworkLink")
.def_readonly("other", &TensorNetwork::Link::other)
.def_readonly("indexPosition", &TensorNetwork::Link::indexPosition)
......
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Copyright (C) 2014-2018 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
......@@ -23,12 +23,13 @@
*/
#define NO_IMPORT_ARRAY
#include "misc.h"
void expose_ttnetwork() {
VECTOR_TO_PY(TTTensor, "TTTensorVector");
VECTOR_TO_PY(TTOperator, "TTOperatorVector");
class_<TTTensor, bases<TensorNetwork>>("TTTensor")
.def(init<const Tensor&, optional<value_t, size_t>>())
.def(init<const Tensor&, value_t, TensorNetwork::RankTuple>())
......@@ -42,7 +43,7 @@ void expose_ttnetwork() {
.def("ranks", &TTTensor::ranks)
.def("rank", &TTTensor::rank)
// .def("frob_norm", &TTTensor::frob_norm) // NOTE unneccessary because correct call is inherited
.def("random",
.def("random",
+[](std::vector<size_t> _dim, std::vector<size_t> _rank) {
return xerus::TTTensor::random(_dim, _rank);
}).staticmethod("random")
......@@ -50,34 +51,34 @@ void expose_ttnetwork() {
.def("kronecker", &TTTensor::kronecker).staticmethod("kronecker")
.def("dirac", static_cast<TTTensor (*)(Tensor::DimensionTuple, const Tensor::MultiIndex&)>(&TTTensor::dirac))
.def("dirac", static_cast<TTTensor (*)(Tensor::DimensionTuple, const size_t)>(&TTTensor::dirac)).staticmethod("dirac")
.def("use_dense_representations", &TTTensor::use_dense_representations)
.def("reduce_to_maximal_ranks", &TTTensor::reduce_to_maximal_ranks).staticmethod("reduce_to_maximal_ranks")
// .def("degrees_of_freedom", static_cast<size_t (TTTensor::*)()>(&TTTensor::degrees_of_freedom))
.def("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTTensor::degrees_of_freedom)).staticmethod("degrees_of_freedom")
.def("chop",
.def("chop",
+[](TTTensor &_this, size_t _pos) {
const auto result = _this.chop(_pos);
return boost::python::make_tuple(result.first, result.second);
}, arg("position"))
// .def("round", static_cast<void (TTTensor::*)(const std::vector<size_t>&, double)>(&TTTensor::round),
// (arg("ranks"), arg("epsilon")=EPSILON)
// )
.def("round", static_cast<void (TTTensor::*)(double)>(&TTTensor::round))
// .def("round", static_cast<void (TTTensor::*)(size_t)>(&TTTensor::round))
.def("soft_threshold", static_cast<void (TTTensor::*)(const double, const bool)>(&TTTensor::soft_threshold),
(arg("tau"), arg("preventZero")=false)
)
.def("soft_threshold", static_cast<void (TTTensor::*)(const std::vector<double>&, const bool)>(&TTTensor::soft_threshold),
(arg("tau"), arg("preventZero")=false)
)
.def("move_core", &TTTensor::move_core,
(arg("position"), arg("keepRank")=false)
)
.def("assume_core_position", &TTTensor::assume_core_position)
.def("canonicalize_left", &TTTensor::canonicalize_left)
.def("canonicalize_right", &TTTensor::canonicalize_right)
......@@ -89,11 +90,11 @@ void expose_ttnetwork() {
.def(self += self)
.def(self -= self)
;
def("entrywise_product", static_cast<TTTensor (*)(const TTTensor&, const TTTensor&)>(&entrywise_product));
def("find_largest_entry", static_cast<size_t (*)(const TTTensor&, value_t, value_t)>(&find_largest_entry));
def("dyadic_product", static_cast<TTTensor (*)(const std::vector<TTTensor> &)>(&dyadic_product));
class_<TTOperator, bases<TensorNetwork>>("TTOperator")
.def(init<const Tensor&, optional<value_t, size_t>>())
.def(init<const Tensor&, value_t, TensorNetwork::RankTuple>())
......@@ -107,7 +108,7 @@ void expose_ttnetwork() {
.def("ranks", &TTOperator::ranks)
.def("rank", &TTOperator::rank)
// .def("frob_norm", &TTOperator::frob_norm) // NOTE unneccessary because correct call is inherited
.def("random",
.def("random",
+[](std::vector<size_t> _dim, std::vector<size_t> _rank) {
return xerus::TTOperator::random(_dim, _rank);
}).staticmethod("random")
......@@ -115,34 +116,34 @@ void expose_ttnetwork() {
.def("kronecker", &TTOperator::kronecker).staticmethod("kronecker")
.def("dirac", static_cast<TTOperator (*)(Tensor::DimensionTuple, const Tensor::MultiIndex&)>(&TTOperator::dirac))
.def("dirac", static_cast<TTOperator (*)(Tensor::DimensionTuple, const size_t)>(&TTOperator::dirac)).staticmethod("dirac")
.def("use_dense_representations", &TTOperator::use_dense_representations)
.def("reduce_to_maximal_ranks", &TTOperator::reduce_to_maximal_ranks).staticmethod("reduce_to_maximal_ranks")
// .def("degrees_of_freedom", static_cast<size_t (TTOperator::*)()>(&TTOperator::degrees_of_freedom))
.def("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTOperator::degrees_of_freedom)).staticmethod("degrees_of_freedom")
.def("chop",
.def("chop",
+[](TTOperator &_this, size_t _pos) {
const auto result = _this.chop(_pos);
return boost::python::make_tuple(result.first, result.second);
}, arg("position"))
.def("round", static_cast<void (TTOperator::*)(const std::vector<size_t>&, double)>(&TTOperator::round),
(arg("ranks"), arg("epsilon")=EPSILON)
)
.def("round", static_cast<void (TTOperator::*)(double)>(&TTOperator::round))
.def("round", static_cast<void (TTOperator::*)(size_t)>(&TTOperator::round))
.def("soft_threshold", static_cast<void (TTOperator::*)(const double, const bool)>(&TTOperator::soft_threshold),
(arg("tau"), arg("preventZero")=false)
)
.def("soft_threshold", static_cast<void (TTOperator::*)(const std::vector<double>&, const bool)>(&TTOperator::soft_threshold),
(arg("tau"), arg("preventZero")=false)
)
.def("move_core", &TTOperator::move_core,
(arg("position"), arg("keepRank")=false)
)
.def("assume_core_position", &TTOperator::assume_core_position)
.def("canonicalize_left", &TTOperator::canonicalize_left)
.def("canonicalize_right", &TTOperator::canonicalize_right)
......@@ -153,10 +154,10 @@ void expose_ttnetwork() {
.def(self * other<value_t>())
.def(other<value_t>() * self)
.def(self / other<value_t>())
// for TTOperator only:
.def("identity", &TTOperator::identity<>).staticmethod("identity")
.def("identity", &TTOperator::identity<>).staticmethod("identity")
.def("transpose", &TTOperator::transpose<>)
;
def("entrywise_product", static_cast<TTOperator (*)(const TTOperator&, const TTOperator&)>(&entrywise_product));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment