Commit 16c4ceb9 authored by Fuchsi*'s avatar Fuchsi*

split python.cpp into several files

parent 68a62241
Pipeline #729 passed with stages
in 8 minutes and 28 seconds
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
* @file
* @brief Definition of the python bindings of tensor factorizations.
*/
#include "misc.h"
using namespace internal;
void expose_factorizations() {
class_<TensorFactorisation, boost::noncopyable>("TensorFactorisation", boost::python::no_init)
.def("__rlshift__", +[](TensorFactorisation &_rhs, object &_lhs){
std::vector<IndexedTensor<Tensor>*> tmp = extract<std::vector<IndexedTensor<Tensor>*>>(_lhs);
_rhs(tmp);
})
;
class_<SVD, bases<TensorFactorisation>, boost::noncopyable>("SVD_temporary", boost::python::no_init);
def("SVD", +[](IndexedTensor<Tensor> &_rhs)->TensorFactorisation*{
return new SVD(std::move(_rhs));
}, return_value_policy<manage_new_object, // result is treated as a new object
with_custodian_and_ward_postcall<0,1>>()); // but the argument will not be destroyed before the result is destroyed
class_<QR, bases<TensorFactorisation>, boost::noncopyable>("QR_temporary", boost::python::no_init);
def("QR", +[](IndexedTensor<Tensor> &_rhs)->TensorFactorisation*{
return new QR(std::move(_rhs));
}, return_value_policy<manage_new_object, // result is treated as a new object
with_custodian_and_ward_postcall<0,1>>()); // but the argument will not be destroyed before the result is destroyed
class_<RQ, bases<TensorFactorisation>, boost::noncopyable>("RQ_temporary", boost::python::no_init);
def("RQ", +[](IndexedTensor<Tensor> &_rhs)->TensorFactorisation*{
return new RQ(std::move(_rhs));
}, return_value_policy<manage_new_object, // result is treated as a new object
with_custodian_and_ward_postcall<0,1>>()); // but the argument will not be destroyed before the result is destroyed
class_<QC, bases<TensorFactorisation>, boost::noncopyable>("QC_temporary", boost::python::no_init);
def("QC", +[](IndexedTensor<Tensor> &_rhs)->TensorFactorisation*{
return new QC(std::move(_rhs));
}, return_value_policy<manage_new_object, // result is treated as a new object
with_custodian_and_ward_postcall<0,1>>()); // but the argument will not be destroyed before the result is destroyed
class_<CQ, bases<TensorFactorisation>, boost::noncopyable>("CQ_temporary", boost::python::no_init);
def("CQ", +[](IndexedTensor<Tensor> &_rhs)->TensorFactorisation*{
return new CQ(std::move(_rhs));
}, return_value_policy<manage_new_object, // result is treated as a new object
with_custodian_and_ward_postcall<0,1>>()); // but the argument will not be destroyed before the result is destroyed
enum_<Tensor::Representation>("Representation", "Possible representations of Tensor objects.")
.value("Dense", Tensor::Representation::Dense)
.value("Sparse", Tensor::Representation::Sparse)
;
enum_<Tensor::Initialisation>("Initialisation", "Possible initialisations of new Tensor objects.")
.value("Zero", Tensor::Initialisation::Zero)
.value("None", Tensor::Initialisation::None)
;
}
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
* @file
* @brief Definition of the Tensor python bindings.
*/
#include "misc.h"
void expose_indexedTensors() {
// --------------------------------------------------------------- index
class_<Index>("Index",
"helper class to define objects to be used in indexed expressions"
)
.def(init<int64_t>())
.def("__pow__", &Index::operator^, "i**d changes the index i to span d indices in the current expression")
.def("__xor__", &Index::operator^, "i^d changes the index i to span d indices in the current expression")
.def("__div__", &Index::operator/, "i/n changes the index i to span 1/n of all the indices of the current object")
.def("__and__", &Index::operator&, "i&d changes the index i to span all but d indices of the current object")
.def("__str__", static_cast<std::string (*)(const Index &)>(&misc::to_string<Index>))
;
implicitly_convertible<int64_t, Index>();
exec(
"def indices(n=1):\n"
" \"\"\"Create n distinct indices.\"\"\"\n"
" i = 0\n"
" while i<n:\n"
" yield Index()\n"
" i += 1\n"
, scope().attr("__dict__"));
VECTOR_TO_PY(Index, "IndexVector");
implicitly_convertible<internal::IndexedTensorReadOnly<Tensor>, internal::IndexedTensorMoveable<TensorNetwork>>();
implicitly_convertible<internal::IndexedTensorWritable<Tensor>, internal::IndexedTensorMoveable<TensorNetwork>>();
implicitly_convertible<internal::IndexedTensorMoveable<Tensor>, internal::IndexedTensorMoveable<TensorNetwork>>();
implicitly_convertible<internal::IndexedTensor<Tensor>, internal::IndexedTensorMoveable<TensorNetwork>>();
// NOTE in the following all __mul__ variants are defined for the ReadOnly indexed Tensors, even if they are meant for
// the moveable indexed tensors. boost will take care of the proper matching that way. if IndexedTensorMoveable
// defined an __mul__ function on its own it would overwrite all overloaded variants of the readonly indexed tensors
// and thus loose a lot of functionality.
// ---------------------------------------------- indexedTensor<TN>
using namespace internal;
#define ADD_MOVE_AND_RESULT_PTR(name, op, lhs_type, rhs_type, res_type) \
.def(name, \
+[](lhs_type &_l, rhs_type &_r) -> res_type* { \
LOG(pydebug, "python wrapper: " name);\
return new res_type(std::move(_l) op std::move(_r)); \
}, return_value_policy<manage_new_object>())
class_<internal::IndexedTensorReadOnly<TensorNetwork>, boost::noncopyable>("IndexedTensorNetworkReadOnly", no_init)
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__sub__", -, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__sub__", -, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorMoveable<TensorNetwork>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorMoveable<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
.def("__mul__",
+[](internal::IndexedTensorReadOnly<TensorNetwork> &_l, value_t _r) -> internal::IndexedTensorReadOnly<TensorNetwork>* {
LOG(pydebug, "mul TN ro * scalar");
return new internal::IndexedTensorMoveable<TensorNetwork>(std::move(_l) * _r);
}, return_value_policy<manage_new_object>())
.def("__rmul__",
+[](value_t _r, internal::IndexedTensorReadOnly<TensorNetwork> &_l) -> internal::IndexedTensorReadOnly<TensorNetwork>* {
LOG(pydebug, "mul TN scalar * ro");
return new internal::IndexedTensorMoveable<TensorNetwork>(std::move(_l) * _r);
}, return_value_policy<manage_new_object>())
.def("__div__",
+[](internal::IndexedTensorReadOnly<TensorNetwork> &_l, value_t _r) -> internal::IndexedTensorReadOnly<TensorNetwork>* {
LOG(pydebug, "div TN ro / scalar");
return new internal::IndexedTensorMoveable<TensorNetwork>(std::move(_l) / _r);
}, return_value_policy<manage_new_object>())
.def("frob_norm", static_cast<value_t (*)(const IndexedTensorReadOnly<TensorNetwork> &)>(&frob_norm<TensorNetwork>))
.def(float_(self)) // cast to double
;
class_<internal::IndexedTensorWritable<TensorNetwork>, boost::noncopyable, bases<internal::IndexedTensorReadOnly<TensorNetwork>>>("IndexedTensorNetworkWriteable", no_init)
;
class_<internal::IndexedTensorMoveable<TensorNetwork>, boost::noncopyable, bases<internal::IndexedTensorWritable<TensorNetwork>>>("IndexedTensorNetworkMoveable", no_init)
;
class_<internal::IndexedTensor<TensorNetwork>, boost::noncopyable, bases<internal::IndexedTensorWritable<TensorNetwork>>>("IndexedTensorNetwork", no_init)
.def("__lshift__",
+[](internal::IndexedTensor<TensorNetwork> &_lhs, internal::IndexedTensorReadOnly<Tensor> &_rhs) {
std::move(_lhs) = std::move(_rhs);
})
.def("__lshift__",
+[](internal::IndexedTensor<TensorNetwork> &_lhs, internal::IndexedTensorReadOnly<TensorNetwork> &_rhs) {
std::move(_lhs) = std::move(_rhs);
})
;
// --------------------------------------------- indexedTensor<Tensor>
class_<internal::IndexedTensorReadOnly<Tensor>, boost::noncopyable>("IndexedTensorReadOnly", no_init)
ADD_MOVE_AND_RESULT_PTR("__add__", +, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__sub__", -, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__mul__", *, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<TensorNetwork>, IndexedTensorMoveable<TensorNetwork>)
ADD_MOVE_AND_RESULT_PTR("__div__", /, IndexedTensorReadOnly<Tensor>, IndexedTensorReadOnly<Tensor>, IndexedTensorMoveable<Tensor>)
.def("__mul__",
+[](internal::IndexedTensorReadOnly<Tensor> &_l, value_t _r) -> internal::IndexedTensorReadOnly<Tensor>* {
LOG(pydebug, "mul ro * scalar");
return new internal::IndexedTensorMoveable<Tensor>(std::move(_l) * _r);
}, return_value_policy<manage_new_object>())
.def("__rmul__",
+[](value_t _r, internal::IndexedTensorReadOnly<Tensor> &_l) -> internal::IndexedTensorReadOnly<Tensor>* {
LOG(pydebug, "mul scalar * ro");
return new internal::IndexedTensorMoveable<Tensor>(std::move(_l) * _r);
}, return_value_policy<manage_new_object>())
.def("__div__",
+[](internal::IndexedTensorReadOnly<Tensor> &_l, value_t _r) -> internal::IndexedTensorReadOnly<Tensor>* {
LOG(pydebug, "div ro / scalar");
return new internal::IndexedTensorMoveable<Tensor>(std::move(_l) / _r);
}, return_value_policy<manage_new_object>())
.def("frob_norm", static_cast<value_t (*)(const IndexedTensorReadOnly<Tensor> &)>(&frob_norm<Tensor>))
.def(float_(self)) // cast to double
;
class_<internal::IndexedTensorWritable<Tensor>, boost::noncopyable, bases<internal::IndexedTensorReadOnly<Tensor>>>("IndexedTensorWriteable", no_init)
;
class_<internal::IndexedTensorMoveable<Tensor>, boost::noncopyable, bases<internal::IndexedTensorWritable<Tensor>>>("IndexedTensorMoveable", no_init)
;
class_<internal::IndexedTensor<Tensor>, boost::noncopyable, bases<internal::IndexedTensorWritable<Tensor>>>("IndexedTensor", no_init)
.def("__lshift__",
+[](internal::IndexedTensor<Tensor> &_lhs, internal::IndexedTensorReadOnly<Tensor> &_rhs) {
std::move(_lhs) = std::move(_rhs);
})
.def("__lshift__",
+[](internal::IndexedTensor<Tensor> &_lhs, internal::IndexedTensorReadOnly<TensorNetwork> &_rhs) {
std::move(_lhs) = std::move(_rhs);
})
;
VECTOR_TO_PY(IndexedTensor<Tensor>*, "IndexedTensorList");
}
This diff is collapsed.
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
* @file
* @brief Definition of the python bindings of save and restore, exceptions etc..
*/
#include "misc.h"
void variable_argument_member_to_tuple_wrapper(const std::string &_name, const std::string &_tmpName) {
boost::python::str pyCode;
pyCode += "def patch_call_fn():\n";
pyCode += std::string(" original = ")+_name+"\n";
pyCode += std::string(" def ")+_tmpName+"( this, *args ):\n";
pyCode += " return original( this, args )\n";
pyCode += std::string(" return ")+_tmpName+"\n";
pyCode += _name + " = patch_call_fn()\n";
boost::python::exec(pyCode, scope().attr("__dict__"));
}
using namespace internal;
void expose_misc() {
def("frob_norm", +[](const Tensor& _x){ return _x.frob_norm(); });
def("frob_norm", +[](const TensorNetwork& _x){ return _x.frob_norm(); });
def("frob_norm", static_cast<value_t (*)(const IndexedTensorReadOnly<Tensor>&)>(&frob_norm));
def("frob_norm", static_cast<value_t (*)(const IndexedTensorReadOnly<TensorNetwork>&)>(&frob_norm));
def("approx_equal", static_cast<bool (*)(const TensorNetwork&, const TensorNetwork&, double)>(&approx_equal));
def("approx_equal", static_cast<bool (*)(const Tensor&, const TensorNetwork&, double)>(&approx_equal));
def("approx_equal", static_cast<bool (*)(const TensorNetwork&, const Tensor&, double)>(&approx_equal));
def("approx_equal", static_cast<bool (*)(const Tensor&, const Tensor&, double)>(&approx_equal));
def("approx_equal", +[](const Tensor& _l, const Tensor& _r) {
return approx_equal(_l, _r);
});
def("approx_equal", +[](const Tensor& _l, const TensorNetwork& _r) {
return approx_equal(_l, _r);
});
def("approx_equal", +[](const TensorNetwork& _l, const Tensor& _r) {
return approx_equal(_l, _r);
});
def("approx_equal", +[](const TensorNetwork& _l, const TensorNetwork& _r) {
return approx_equal(_l, _r);
});
def("log", +[](std::string _msg){
LOG_SHORT(info, _msg);
});
enum_<misc::FileFormat>("FileFormat")
.value("BINARY", misc::FileFormat::BINARY)
.value("TSV", misc::FileFormat::TSV)
;
def("save_to_file", +[](const Tensor &_obj, const std::string &_filename, misc::FileFormat _format){
misc::save_to_file(_obj, _filename, _format);
}, (arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY) );
def("save_to_file", +[](const TensorNetwork &_obj, const std::string &_filename, misc::FileFormat _format){
misc::save_to_file(_obj, _filename, _format);
}, (arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY) );
def("save_to_file", +[](const TTTensor &_obj, const std::string &_filename, misc::FileFormat _format){
misc::save_to_file(_obj, _filename, _format);
}, (arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY) );
def("save_to_file", +[](const TTOperator &_obj, const std::string &_filename, misc::FileFormat _format){
misc::save_to_file(_obj, _filename, _format);
}, (arg("object"), arg("filename"), arg("format")=misc::FileFormat::BINARY) );
def("load_from_file", +[](std::string _filename){
// determine type stored in the file
std::ifstream in(_filename);
if (!in) {
return object();
}
std::string classname;
in >> classname; // "Xerus"
in >> classname;
in.close();
if (classname == "xerus::Tensor") {
return object(misc::load_from_file<Tensor>(_filename));
}
if (classname == "xerus::TensorNetwork") {
return object(misc::load_from_file<TensorNetwork>(_filename));
}
if (classname == "xerus::TTNetwork<false>") {
return object(misc::load_from_file<TTTensor>(_filename));
}
if (classname == "xerus::TTNetwork<true>") {
return object(misc::load_from_file<TTOperator>(_filename));
}
LOG_SHORT(warning, "unknown class type '" << classname << "' in file '" << _filename << "'");
return object();
});
// identity returns the cpp name to a python object
// def("identity", identity_);
def("xethrow", +[](){XERUS_THROW(misc::generic_error() << misc::get_call_stack());});
// translate all exceptions thrown inside xerus to own python exception class
static char fully_qualified_gen_error_name[] = "xerus.generic_error";
static PyObject* py_gen_error = PyErr_NewException(fully_qualified_gen_error_name, PyExc_Exception, 0);
py::scope().attr("generic_error") = py::handle<>(py::borrowed(py_gen_error));
register_exception_translator<misc::generic_error>([](const misc::generic_error &_e){
LOG(pydebug, "custom exception handler called with " << _e.what());
PyErr_SetString(py_gen_error, _e.what());
});
}
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
* @file
* @brief Definition of common functions for the python bindings.
*/
#include <boost/function.hpp>
#include <boost/python.hpp>
#include <boost/python/stl_iterator.hpp>
#include <boost/python/call.hpp>
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wuseless-cast"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma GCC diagnostic ignored "-Wcast-qual"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wpedantic"
#include <numpy/ndarrayobject.h>
#pragma GCC diagnostic pop
#include "xerus.h"
#include "xerus/misc/internal.h"
using namespace boost::python;
using namespace xerus;
#define parametersDocstr "\n\nParameters\n----------\n"
#define returnsDocstr "\n\nReturns\n-------\n"
#define VECTOR_TO_PY(type, name) class_<std::vector<type>, boost::noncopyable>(name, no_init); \
custom_vector_from_seq<type>(); \
to_python_converter<std::vector<type>, custom_vector_to_list<type>>(); void(0)
#include "vectorAndPair.h"
void variable_argument_member_to_tuple_wrapper(const std::string &_name, const std::string &_tmpName = "new_fn");
void expose_tensor();
void expose_factorizations();
void expose_indexedTensors();
void expose_tensorNetwork();
void expose_ttnetwork();
void expose_leastSquaresAlgorithms();
void expose_recoveryAlgorithms();
void expose_misc();
This diff is collapsed.
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
* @file
* @brief Definition of the python bindings of our recovery and completion algorithms.
*/
#include "misc.h"
void expose_recoveryAlgorithms() {
// ------------------------------------------------------------- measurements
class_<SinglePointMeasurementSet>("SinglePointMeasurementSet")
.def(init<const SinglePointMeasurementSet&>())
.def("get_position", +[](SinglePointMeasurementSet &_this, size_t _i){
return _this.positions[_i];
})
.def("set_position", +[](SinglePointMeasurementSet &_this, size_t _i, std::vector<size_t> _pos){
_this.positions[_i] = _pos;
})
.def("get_measuredValue", +[](SinglePointMeasurementSet &_this, size_t _i){
return _this.measuredValues[_i];
})
.def("set_measuredValue", +[](SinglePointMeasurementSet &_this, size_t _i, value_t _val){
_this.measuredValues[_i] = _val;
})
.def("add", &SinglePointMeasurementSet::add)
.def("size", &SinglePointMeasurementSet::size)
.def("degree", &SinglePointMeasurementSet::degree)
.def("frob_norm", &SinglePointMeasurementSet::frob_norm)
.def("sort", &SinglePointMeasurementSet::sort, arg("positionsOnly")=false)
.def("measure", static_cast<void (SinglePointMeasurementSet::*)(const Tensor &)>(&SinglePointMeasurementSet::measure), arg("solution"))
.def("measure", static_cast<void (SinglePointMeasurementSet::*)(const TensorNetwork &)>(&SinglePointMeasurementSet::measure), arg("solution"))
.def("measure", +[](SinglePointMeasurementSet &_this, PyObject *_f) {
// TODO increase ref count for _f? also decrease it on overwrite?!
_this.measure([&_f](const std::vector<size_t> &pos)->double {
return call<double>(_f, pos);
});
})
.def("test", static_cast<double (SinglePointMeasurementSet::*)(const Tensor &) const>(&SinglePointMeasurementSet::test), arg("solution"))
.def("test", static_cast<double (SinglePointMeasurementSet::*)(const TensorNetwork &) const>(&SinglePointMeasurementSet::test), arg("solution"))
.def("test", +[](SinglePointMeasurementSet &_this, PyObject *_f)->double {
// TODO increase ref count for _f? also decrease it on overwrite?!
return _this.test([&_f](const std::vector<size_t> &pos)->double {
return call<double>(_f, pos);
});
})
.def("random",static_cast<SinglePointMeasurementSet (*)(size_t, const std::vector<size_t>&)>(&SinglePointMeasurementSet::random))
.def("random",static_cast<SinglePointMeasurementSet (*)(size_t, const Tensor&)>(&SinglePointMeasurementSet::random))
.def("random",static_cast<SinglePointMeasurementSet (*)(size_t, const TensorNetwork&)>(&SinglePointMeasurementSet::random))
.def("random",+[](size_t n, const std::vector<size_t> &dim, PyObject *_f) {
// TODO increase ref count for _f? also decrease it on overwrite?!
return SinglePointMeasurementSet::random(n, dim, [&_f](const std::vector<size_t> &pos)->double {
return call<double>(_f, pos);
});
})
.staticmethod("random")
;
def("IHT", &IHT, (arg("x"), arg("measurements"), arg("perfData")=NoPerfData) );
VECTOR_TO_PY(Tensor, "TensorVector");
class_<RankOneMeasurementSet>("RankOneMeasurementSet")
.def(init<const RankOneMeasurementSet&>())
.def("get_position", +[](RankOneMeasurementSet &_this, size_t _i){
return _this.positions[_i];
})
.def("set_position", +[](RankOneMeasurementSet &_this, size_t _i, std::vector<Tensor> _pos){
_this.positions[_i] = _pos;
})
.def("get_measuredValue", +[](RankOneMeasurementSet &_this, size_t _i){
return _this.measuredValues[_i];
})
.def("set_measuredValue", +[](RankOneMeasurementSet &_this, size_t _i, value_t _val){
_this.measuredValues[_i] = _val;
})
.def("add", &RankOneMeasurementSet::add)
.def("size", &RankOneMeasurementSet::size)
.def("degree", &RankOneMeasurementSet::degree)
.def("frob_norm", &RankOneMeasurementSet::frob_norm)
.def("sort", &RankOneMeasurementSet::sort, arg("positionsOnly")=false)
.def("normalize", &RankOneMeasurementSet::normalize)
.def("measure", static_cast<void (RankOneMeasurementSet::*)(const Tensor &)>(&RankOneMeasurementSet::measure), arg("solution"))
.def("measure", static_cast<void (RankOneMeasurementSet::*)(const TensorNetwork &)>(&RankOneMeasurementSet::measure), arg("solution"))
.def("measure", +[](RankOneMeasurementSet &_this, PyObject *_f) {
// TODO increase ref count for _f? also decrease it on overwrite?!
_this.measure([&_f](const std::vector<Tensor> &pos)->double {
return call<double>(_f, pos);
});
})
.def("test", static_cast<double (RankOneMeasurementSet::*)(const Tensor &) const>(&RankOneMeasurementSet::test), arg("solution"))
.def("test", static_cast<double (RankOneMeasurementSet::*)(const TensorNetwork &) const>(&RankOneMeasurementSet::test), arg("solution"))
.def("test", +[](RankOneMeasurementSet &_this, PyObject *_f)->double {
// TODO increase ref count for _f? also decrease it on overwrite?!
return _this.test([&_f](const std::vector<Tensor> &pos)->double {
return call<double>(_f, pos);
});
})
.def("random",static_cast<RankOneMeasurementSet (*)(size_t, const std::vector<size_t>&)>(&RankOneMeasurementSet::random))
.def("random",static_cast<RankOneMeasurementSet (*)(size_t, const Tensor&)>(&RankOneMeasurementSet::random))
.def("random",static_cast<RankOneMeasurementSet (*)(size_t, const TensorNetwork&)>(&RankOneMeasurementSet::random))
.def("random",+[](size_t n, const std::vector<size_t> &dim, PyObject *_f) {
// TODO increase ref count for _f? also decrease it on overwrite?!
return RankOneMeasurementSet::random(n, dim, [&_f](const std::vector<Tensor> &pos)->double {
return call<double>(_f, pos);
});
})
.staticmethod("random")
;
// ------------------------------------------------------------- ADF
class_<ADFVariant>("ADFVariant", init<size_t, double, double>())
.def(init<ADFVariant>())
.def_readwrite("maxIterations", &ADFVariant::maxIterations)
.def_readwrite("targetResidualNorm", &ADFVariant::targetResidualNorm)
.def_readwrite("minimalResidualNormDecrease", &ADFVariant::minimalResidualNormDecrease)
.def("__call__", +[](ADFVariant &_this, TTTensor& _x, const SinglePointMeasurementSet& _meas, PerformanceData& _pd){
return _this(_x, _meas, _pd);
}, (arg("x"), arg("measurements"), arg("perfData")=NoPerfData) )
.def("__call__", +[](ADFVariant &_this, TTTensor& _x, const SinglePointMeasurementSet& _meas, const std::vector<size_t>& _maxRanks, PerformanceData& _pd){
return _this(_x, _meas, _maxRanks, _pd);
}, (arg("x"), arg("measurements"), arg("maxRanks"), arg("perfData")=NoPerfData) )
.def("__call__", +[](ADFVariant &_this, TTTensor& _x, const RankOneMeasurementSet& _meas, PerformanceData& _pd){
return _this(_x, _meas, _pd);
}, (arg("x"), arg("measurements"), arg("perfData")=NoPerfData) )
.def("__call__", +[](ADFVariant &_this, TTTensor& _x, const RankOneMeasurementSet& _meas, const std::vector<size_t>& _maxRanks, PerformanceData& _pd){
return _this(_x, _meas, _maxRanks, _pd);
}, (arg("x"), arg("measurements"), arg("maxRanks"), arg("perfData")=NoPerfData) )
;
scope().attr("ADF") = object(ptr(&ADF));
class_<UQMeasurementSet>("UQMeasurementSet")
.def(init<const UQMeasurementSet&>())
.def("add", &UQMeasurementSet::add)
.def("add_initial", &UQMeasurementSet::add_initial)
;
def("uq_avg", &uq_avg);
VECTOR_TO_PY(std::vector<double>, "DoubleVectorVector");
py_pair<std::vector<std::vector<double>>, std::vector<Tensor>>();
def("uq_mc", &uq_mc);
def("uq_adf", +[](const UQMeasurementSet& _measurments, const TTTensor& _guess) {
return uq_adf(_measurments, _guess);
}, ( arg("measurments"), arg("guess")) );
}
This diff is collapsed.
// Xerus - A General Purpose Tensor Library
// Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf.
//
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org
// or contact us at contact@libXerus.org.
/**
* @file
* @brief Definition of the Tensor Network python bindings.
*/
#include "misc.h"
void expose_tensorNetwork() {
VECTOR_TO_PY(TensorNetwork::Link, "LinkVector");
VECTOR_TO_PY(TensorNetwork::TensorNode, "TensorNetworkNodeVector");
// TODO allow modification of the network
{ scope TN_scope =
class_<TensorNetwork>("TensorNetwork")
.def(init<Tensor>())
.def(init<const TensorNetwork &>())
.add_property("dimensions", +[](TensorNetwork &_A) {
return _A.dimensions;
})
.def("degree", &TensorNetwork::degree)
.def("datasize", &TensorNetwork::datasize)
.add_property("nodes", +[](TensorNetwork &_this){
return _this.nodes;
})
.def("node", +[](TensorNetwork &_this, size_t _i) {
return _this.nodes[_i];
})
.add_property("externalLinks", +[](TensorNetwork &_this){
return _this.externalLinks;
})
.def("__call__", +[](TensorNetwork &_this, const std::vector<Index> &_idx){
return new xerus::internal::IndexedTensor<TensorNetwork>(std::move(_this(_idx)));
}, return_value_policy<manage_new_object, with_custodian_and_ward_postcall<0, 1>>() )
.def(self * other<value_t>())
.def(other<value_t>() * self)
.def(self / other<value_t>())
.def("__getitem__", +[](TensorNetwork &_this, size_t _i) {
if (_i >= misc::product(_this.dimensions)) {
PyErr_SetString(PyExc_IndexError, "Index out of range");
throw_error_already_set();
}
return _this[_i];
})
.def("__getitem__", +[](TensorNetwork &_this, std::vector<size_t> _idx) {
return _this[_idx];
})
// .def("reshuffle_nodes", +[](TensorNetwork &_this, boost::python::object _f) { //TODO
// _this.reshuffle_nodes(_f);
// })
.def("require_valid_network"