Commit 914c755a authored by Philipp  Trunschke's avatar Philipp Trunschke

bugfixes in python wrapper

parent f9223e2c
Pipeline #2005 failed with stages
in 16 minutes and 47 seconds
...@@ -48,7 +48,7 @@ void expose_htnetwork(module& m) { ...@@ -48,7 +48,7 @@ void expose_htnetwork(module& m) {
.def_readonly("corePosition", &HTTensor::corePosition) .def_readonly("corePosition", &HTTensor::corePosition)
.def("ranks", &HTTensor::ranks) .def("ranks", &HTTensor::ranks)
.def("rank", &HTTensor::rank) .def("rank", &HTTensor::rank)
// .def("frob_norm", &TTTensor::frob_norm) // NOTE unneccessary because correct call is inherited //.def("frob_norm", &TTTensor::frob_norm) // NOTE unneccessary because correct call is inherited
.def_static("random", .def_static("random",
+[](std::vector<size_t> _dim, std::vector<size_t> _rank) { +[](std::vector<size_t> _dim, std::vector<size_t> _rank) {
return xerus::HTTensor::random(_dim, _rank); return xerus::HTTensor::random(_dim, _rank);
...@@ -60,12 +60,7 @@ void expose_htnetwork(module& m) { ...@@ -60,12 +60,7 @@ void expose_htnetwork(module& m) {
//.def("use_dense_representations", &TTTensor::use_dense_representations) //.def("use_dense_representations", &TTTensor::use_dense_representations)
//.def_static("reduce_to_maximal_ranks", &TTTensor::reduce_to_maximal_ranks) //.def_static("reduce_to_maximal_ranks", &TTTensor::reduce_to_maximal_ranks)
//// .def("degrees_of_freedom", static_cast<size_t (TTTensor::*)()>(&TTTensor::degrees_of_freedom)) //.def("degrees_of_freedom", static_cast<size_t (TTTensor::*)()>(&TTTensor::degrees_of_freedom))
//.def_static("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTTensor::degrees_of_freedom))
// +[](HTTensor &_this, size_t _pos) {
// const auto result = _this.chop(_pos);
// return boost::python::make_tuple(result.first, result.second);
// }, arg("position"))
.def("round", static_cast<void (HTTensor::*)(const std::vector<size_t>&, double)>(&HTTensor::round), .def("round", static_cast<void (HTTensor::*)(const std::vector<size_t>&, double)>(&HTTensor::round),
arg("ranks"), arg("epsilon")=EPSILON arg("ranks"), arg("epsilon")=EPSILON
...@@ -109,7 +104,7 @@ void expose_htnetwork(module& m) { ...@@ -109,7 +104,7 @@ void expose_htnetwork(module& m) {
.def_readonly("corePosition", &HTOperator::corePosition) .def_readonly("corePosition", &HTOperator::corePosition)
.def("ranks", &HTOperator::ranks) .def("ranks", &HTOperator::ranks)
.def("rank", &HTOperator::rank) .def("rank", &HTOperator::rank)
// .def("frob_norm", &TTOperator::frob_norm) // NOTE unneccessary because correct call is inherited //.def("frob_norm", &TTOperator::frob_norm) // NOTE unneccessary because correct call is inherited
.def_static("random", .def_static("random",
+[](std::vector<size_t> _dim, std::vector<size_t> _rank) { +[](std::vector<size_t> _dim, std::vector<size_t> _rank) {
return xerus::HTOperator::random(_dim, _rank); return xerus::HTOperator::random(_dim, _rank);
...@@ -121,14 +116,10 @@ void expose_htnetwork(module& m) { ...@@ -121,14 +116,10 @@ void expose_htnetwork(module& m) {
//.def("use_dense_representations", &TTOperator::use_dense_representations) //.def("use_dense_representations", &TTOperator::use_dense_representations)
//.def("reduce_to_maximal_ranks", &TTOperator::reduce_to_maximal_ranks).staticmethod("reduce_to_maximal_ranks") //.def("reduce_to_maximal_ranks", &TTOperator::reduce_to_maximal_ranks).staticmethod("reduce_to_maximal_ranks")
// .def("degrees_of_freedom", static_cast<size_t (TTOperator::*)()>(&TTOperator::degrees_of_freedom)) //.def("degrees_of_freedom", static_cast<size_t (TTOperator::*)()>(&TTOperator::degrees_of_freedom))
//.def("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTOperator::degrees_of_freedom)).staticmethod("degrees_of_freedom") //.def("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTOperator::degrees_of_freedom)).staticmethod("degrees_of_freedom")
// .def("chop", //.def("chop", ...)
// +[](HTOperator &_this, size_t _pos) {
// const auto result = _this.chop(_pos);
// return boost::python::make_tuple(result.first, result.second);
// }, arg("position"))
//
.def("round", static_cast<void (HTOperator::*)(const std::vector<size_t>&, double)>(&HTOperator::round), .def("round", static_cast<void (HTOperator::*)(const std::vector<size_t>&, double)>(&HTOperator::round),
arg("ranks"), arg("epsilon")=EPSILON arg("ranks"), arg("epsilon")=EPSILON
) )
......
This diff is collapsed.
#include "misc.h" #include "misc.h"
#include<pybind11/numpy.h>
std::vector<size_t> strides_from_dimensions_and_item_size(const std::vector<size_t>& _dimensions, const size_t _item_size) { std::vector<size_t> strides_from_dimensions_and_item_size(const std::vector<size_t>& _dimensions, const size_t _item_size) {
...@@ -6,14 +7,30 @@ std::vector<size_t> strides_from_dimensions_and_item_size(const std::vector<size ...@@ -6,14 +7,30 @@ std::vector<size_t> strides_from_dimensions_and_item_size(const std::vector<size
std::vector<size_t> strides(ndim, 0); std::vector<size_t> strides(ndim, 0);
if (ndim > 0) { if (ndim > 0) {
strides[ndim-1] = _item_size; strides[ndim-1] = _item_size;
for (size_t i=0; i<ndim-1; ++i) { for (size_t i=ndim-1; i>0; --i) {
size_t rev_i = ndim-1-i; strides[i-1] = _dimensions[i] * strides[i];
strides[rev_i-1] = _dimensions[rev_i] * strides[rev_i];
} }
} }
return strides; return strides;
} }
Tensor Tensor_from_buffer(buffer& _b) {
// cast buffer into c_contiguous array (removes boilerplate code)
auto b = array_t<value_t, array::c_style | array::forcecast>::ensure(_b);
buffer_info info = b.request();
if (info.shape.size() == 1 and info.shape[0] == 0) {
return Tensor({}, Tensor::Representation::Dense, Tensor::Initialisation::None);
}
std::vector<size_t> dims(info.shape.begin(), info.shape.end());
std::vector<size_t> strides(info.strides.begin(), info.strides.end());
Tensor result(dims, Tensor::Representation::Dense, Tensor::Initialisation::None);
misc::copy(result.get_unsanitized_dense_data(), static_cast<double*>(info.ptr), result.size);
return result;
}
void expose_tensor(module& m) { void expose_tensor(module& m) {
enum_<Tensor::Representation>(m, "Representation", "Possible representations of Tensor objects.") enum_<Tensor::Representation>(m, "Representation", "Possible representations of Tensor objects.")
...@@ -22,7 +39,7 @@ void expose_tensor(module& m) { ...@@ -22,7 +39,7 @@ void expose_tensor(module& m) {
; ;
enum_<Tensor::Initialisation>(m, "Initialisation", "Possible initialisations of new Tensor objects.") enum_<Tensor::Initialisation>(m, "Initialisation", "Possible initialisations of new Tensor objects.")
.value("Zero", Tensor::Initialisation::Zero) .value("Zero", Tensor::Initialisation::Zero)
.value("None", Tensor::Initialisation::None) .value("Uninitialized", Tensor::Initialisation::None) /* None is a protected keyword in python */
; ;
class_<Tensor>(m, "Tensor", "a non-decomposed Tensor in either sparse or dense representation", buffer_protocol()) class_<Tensor>(m, "Tensor", "a non-decomposed Tensor in either sparse or dense representation", buffer_protocol())
...@@ -58,41 +75,7 @@ void expose_tensor(module& m) { ...@@ -58,41 +75,7 @@ void expose_tensor(module& m) {
LOG(warning, "Deprecation warning: `from_function` is deprecated and will be removed in Xerus v5.0.0. Use the `Tensor` constructor instead."); LOG(warning, "Deprecation warning: `from_function` is deprecated and will be removed in Xerus v5.0.0. Use the `Tensor` constructor instead.");
return Tensor(_dim, _f); return Tensor(_dim, _f);
}) })
.def_static("from_buffer", +[](buffer& b){ .def_static("from_buffer", &Tensor_from_buffer)
buffer_info info = b.request();
if (info.format != format_descriptor<double>::format()) {
throw std::runtime_error("Incompatible format: expected a double array!");
}
if (info.itemsize != sizeof(value_t)) {
std::ostringstream msg;
msg << "Incompatible size: " << info.itemsize << " (got) vs " << sizeof(value_t) << " (expected)";
throw std::runtime_error(msg.str());
}
if (info.shape.size() == 1 and info.shape[0] == 0) {
return Tensor({}, Tensor::Representation::Dense, Tensor::Initialisation::None);
}
std::vector<size_t> dims(info.shape.begin(), info.shape.end());
std::vector<size_t> strides(info.strides.begin(), info.strides.end());
if (strides != strides_from_dimensions_and_item_size(dims, info.itemsize)) {
std::ostringstream msg;
msg << "Incompatible strides: " << strides << " (got) vs " << strides_from_dimensions_and_item_size(dims, info.itemsize) << " (expected). Make sure your buffer is C contiguous." << std::endl;
throw std::runtime_error(msg.str());
}
Tensor result(dims, Tensor::Representation::Dense, Tensor::Initialisation::None);
/* *(result.override_dense_data()) = static_cast<double*>(info.ptr); */
misc::copy(result.get_unsanitized_dense_data(), static_cast<double*>(info.ptr), result.size);
return result;
})
.def("__float__", [](const Tensor &_self){
if (_self.order() != 0) {
throw value_error("order must be 0");
}
return value_t(_self());
})
.def_property_readonly("dimensions", +[](Tensor &_A) { .def_property_readonly("dimensions", +[](Tensor &_A) {
return _A.dimensions; return _A.dimensions;
}) })
...@@ -208,13 +191,25 @@ arg("dim") ...@@ -208,13 +191,25 @@ arg("dim")
return new xerus::internal::IndexedTensor<Tensor>(std::move(_this(idx))); return new xerus::internal::IndexedTensor<Tensor>(std::move(_this(idx)));
}, keep_alive<0,1>(), return_value_policy::take_ownership ) }, keep_alive<0,1>(), return_value_policy::take_ownership )
.def("__str__", &Tensor::to_string) .def("__str__", &Tensor::to_string)
.def(self * value_t()) /* .def(-self) */
.def(value_t() * self) .def("__neg__",
.def(self / value_t()) +[](TTTensor& _self) {
return (-1)*_self;
})
.def(self + self) .def(self + self)
.def(self - self) .def(self - self)
.def(self += self) .def(self += self)
.def(self -= self) .def(self -= self)
.def(self * value_t())
.def(value_t() * self)
.def(self *= value_t())
.def(self / value_t())
/* .def(self /= self) */
.def("__itruediv__",
+[](TTTensor& _self, const value_t _other) {
return (_self *= (1/_other));
})
.def("__getitem__", +[](Tensor &_this, size_t _i) { .def("__getitem__", +[](Tensor &_this, size_t _i) {
if (_i >= _this.size) { if (_i >= _this.size) {
throw index_error("Index out of range"); throw index_error("Index out of range");
...@@ -230,6 +225,65 @@ arg("dim") ...@@ -230,6 +225,65 @@ arg("dim")
.def("__setitem__", +[](Tensor &_this, std::vector<size_t> _i, value_t _val) { .def("__setitem__", +[](Tensor &_this, std::vector<size_t> _i, value_t _val) {
_this[_i] = _val; _this[_i] = _val;
}) })
// .def("__float__", [](const Tensor &_self){ return value_t(_self); }) //TODO: does not work! use implicitly_convertible<Tensor, internal::IndexedTensorReadOnly<TensorNetwork>>(); .def("__float__", [](const Tensor &_self){
if (_self.order() != 0) {
throw value_error("order must be 0");
}
return value_t(_self());
})
; ;
} }
// NOTE The following code (when defined globally) would cast every xerus::Tensor to a numpy.ndarray.
// This would allow for cleaner code like the following:
// tt = xe.TTTensor([3])
// tt.set_component(0, np.arange(3)[None,:,None])
/* namespace pybind11 { namespace detail { */
/* template <> struct type_caster<Tensor> */
/* { */
/* public: */
/* PYBIND11_TYPE_CASTER(Tensor, _("Tensor")); */
/* // Conversion part 1 (Python -> C++) */
/* bool load(handle src, bool convert) */
/* { */
/* if ( !convert and !array_t<value_t>::check_(src) ) */
/* return false; */
/* auto buf = array_t<value_t, array::c_style | array::forcecast>::ensure(src); */
/* if ( !buf ) */
/* return false; */
/* try { */
/* value = Tensor_from_buffer(buf); */
/* } catch (const std::runtime_error&) { */
/* return false; */
/* } */
/* return true; */
/* } */
/* //Conversion part 2 (C++ -> Python) */
/* static handle cast(const Tensor& src, return_value_policy policy, handle parent) */
/* { */
/* std::cerr << "cast Tensor -> array" << std::endl; */
/* std::cerr << " create dimension vector" << std::endl; */
/* std::vector<size_t> shape = src.dimensions; */
/* std::cerr << " create strides vector" << std::endl; */
/* std::vector<size_t> strides = strides_from_dimensions_and_item_size(shape, sizeof(value_t)); */
/* /1* array a(std::move(shape), std::move(strides), src.get_dense_data()); *1/ */
/* /1* return a.release(); *1/ */
/* if (src.is_dense()) { */
/* std::cerr << " is_dense" << std::endl; */
/* array a(std::move(shape), std::move(strides), src.get_unsanitized_dense_data()); */
/* return a.release(); */
/* } else { */
/* std::cerr << " is_sparse" << std::endl; */
/* Tensor tmp(src); */
/* array a(std::move(shape), std::move(strides), tmp.get_dense_data()); */
/* return a.release(); */
/* } */
/* } */
/* }; */
/* }} // namespace pybind11::detail */
...@@ -10,6 +10,7 @@ void expose_ttnetwork(module& m) { ...@@ -10,6 +10,7 @@ void expose_ttnetwork(module& m) {
return misc::deserialize<TTTensor>(_bytes); return misc::deserialize<TTTensor>(_bytes);
} }
)) ))
.def(init<>(), "constructs an empty TTTensor")
.def(init<const TTTensor &>()) .def(init<const TTTensor &>())
.def(init<const Tensor&>()) .def(init<const Tensor&>())
.def(init<const Tensor&, value_t>()) .def(init<const Tensor&, value_t>())
...@@ -23,7 +24,7 @@ void expose_ttnetwork(module& m) { ...@@ -23,7 +24,7 @@ void expose_ttnetwork(module& m) {
.def_readonly("corePosition", &TTTensor::corePosition) .def_readonly("corePosition", &TTTensor::corePosition)
.def("ranks", &TTTensor::ranks) .def("ranks", &TTTensor::ranks)
.def("rank", &TTTensor::rank) .def("rank", &TTTensor::rank)
// .def("frob_norm", &TTTensor::frob_norm) // NOTE unneccessary because correct call is inherited /* .def("frob_norm", &TTTensor::frob_norm) // NOTE unneccessary because correct call is inherited */
.def_static("random", .def_static("random",
+[](std::vector<size_t> _dim, std::vector<size_t> _rank) { +[](std::vector<size_t> _dim, std::vector<size_t> _rank) {
return xerus::TTTensor::random(_dim, _rank); return xerus::TTTensor::random(_dim, _rank);
...@@ -36,13 +37,9 @@ void expose_ttnetwork(module& m) { ...@@ -36,13 +37,9 @@ void expose_ttnetwork(module& m) {
.def("use_dense_representations", &TTTensor::use_dense_representations) .def("use_dense_representations", &TTTensor::use_dense_representations)
.def_static("reduce_to_maximal_ranks", &TTTensor::reduce_to_maximal_ranks) .def_static("reduce_to_maximal_ranks", &TTTensor::reduce_to_maximal_ranks)
// .def("degrees_of_freedom", static_cast<size_t (TTTensor::*)() const>(&TTTensor::degrees_of_freedom)) // NOTE overloading a method with both static and instance methods is not supported /* .def("degrees_of_freedom", static_cast<size_t (TTTensor::*)() const>(&TTTensor::degrees_of_freedom)) // NOTE overloading a method with both static and instance methods is not supported */
.def_static("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTTensor::degrees_of_freedom)) .def_static("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTTensor::degrees_of_freedom))
.def("chop", &TTTensor::chop, arg("position")) .def("chop", &TTTensor::chop, arg("position"))
/* +[](TTTensor &_this, size_t _pos) { */
/* const auto result = _this.chop(_pos); */
/* return boost::python::make_tuple(result.first, result.second); */
/* }, arg("position")) */
.def("round", static_cast<void (TTTensor::*)(const std::vector<size_t>&, double)>(&TTTensor::round), .def("round", static_cast<void (TTTensor::*)(const std::vector<size_t>&, double)>(&TTTensor::round),
arg("ranks"), arg("epsilon")=EPSILON arg("ranks"), arg("epsilon")=EPSILON
...@@ -57,13 +54,24 @@ void expose_ttnetwork(module& m) { ...@@ -57,13 +54,24 @@ void expose_ttnetwork(module& m) {
.def("assume_core_position", &TTTensor::assume_core_position) .def("assume_core_position", &TTTensor::assume_core_position)
.def("canonicalize_left", &TTTensor::canonicalize_left) .def("canonicalize_left", &TTTensor::canonicalize_left)
.def("canonicalize_right", &TTTensor::canonicalize_right) .def("canonicalize_right", &TTTensor::canonicalize_right)
/* .def(-self) */
.def("__neg__",
+[](TTTensor& _self) {
return (-1)*_self;
})
.def(self + self) .def(self + self)
.def(self - self) .def(self - self)
.def(self += self)
.def(self -= self)
.def(self * value_t()) .def(self * value_t())
.def(value_t() * self) .def(value_t() * self)
.def(self *= value_t())
.def(self / value_t()) .def(self / value_t())
.def(self += self) /* .def(self /= self) */
.def(self -= self) .def("__itruediv__",
+[](TTTensor& _self, value_t _other) {
return (_self *= (1/_other));
})
; ;
m.def("entrywise_product", static_cast<TTTensor (*)(const TTTensor&, const TTTensor&)>(&entrywise_product)); m.def("entrywise_product", static_cast<TTTensor (*)(const TTTensor&, const TTTensor&)>(&entrywise_product));
...@@ -84,7 +92,7 @@ void expose_ttnetwork(module& m) { ...@@ -84,7 +92,7 @@ void expose_ttnetwork(module& m) {
.def_readonly("corePosition", &TTOperator::corePosition) .def_readonly("corePosition", &TTOperator::corePosition)
.def("ranks", &TTOperator::ranks) .def("ranks", &TTOperator::ranks)
.def("rank", &TTOperator::rank) .def("rank", &TTOperator::rank)
// .def("frob_norm", &TTOperator::frob_norm) // NOTE unneccessary because correct call is inherited /* .def("frob_norm", &TTOperator::frob_norm) // NOTE unneccessary because correct call is inherited */
.def_static("random", //TODO check error throwing python crashes when error from xerus is thrown .def_static("random", //TODO check error throwing python crashes when error from xerus is thrown
+[](std::vector<size_t> _dim, std::vector<size_t> _rank) { +[](std::vector<size_t> _dim, std::vector<size_t> _rank) {
return xerus::TTOperator::random(_dim, _rank); return xerus::TTOperator::random(_dim, _rank);
...@@ -97,13 +105,9 @@ void expose_ttnetwork(module& m) { ...@@ -97,13 +105,9 @@ void expose_ttnetwork(module& m) {
.def("use_dense_representations", &TTOperator::use_dense_representations) .def("use_dense_representations", &TTOperator::use_dense_representations)
.def_static("reduce_to_maximal_ranks", &TTOperator::reduce_to_maximal_ranks) .def_static("reduce_to_maximal_ranks", &TTOperator::reduce_to_maximal_ranks)
// .def("degrees_of_freedom", static_cast<size_t (TTOperator::*)()>(&TTOperator::degrees_of_freedom)) /* .def("degrees_of_freedom", static_cast<size_t (TTOperator::*)()>(&TTOperator::degrees_of_freedom)) */
.def_static("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTOperator::degrees_of_freedom)) .def_static("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTOperator::degrees_of_freedom))
.def("chop", &TTOperator::chop, arg("position")) .def("chop", &TTOperator::chop, arg("position"))
/* +[](TTOperator &_this, size_t _pos) { */
/* const auto result = _this.chop(_pos); */
/* return make_tuple(result.first, result.second); */
/* }, arg("position")) */
.def("round", static_cast<void (TTOperator::*)(const std::vector<size_t>&, double)>(&TTOperator::round), arg("ranks"), arg("epsilon")=EPSILON) .def("round", static_cast<void (TTOperator::*)(const std::vector<size_t>&, double)>(&TTOperator::round), arg("ranks"), arg("epsilon")=EPSILON)
.def("round", static_cast<void (TTOperator::*)(double)>(&TTOperator::round)) .def("round", static_cast<void (TTOperator::*)(double)>(&TTOperator::round))
...@@ -116,14 +120,24 @@ void expose_ttnetwork(module& m) { ...@@ -116,14 +120,24 @@ void expose_ttnetwork(module& m) {
.def("assume_core_position", &TTOperator::assume_core_position) .def("assume_core_position", &TTOperator::assume_core_position)
.def("canonicalize_left", &TTOperator::canonicalize_left) .def("canonicalize_left", &TTOperator::canonicalize_left)
.def("canonicalize_right", &TTOperator::canonicalize_right) .def("canonicalize_right", &TTOperator::canonicalize_right)
/* .def(-self) */
.def("__neg__",
+[](TTTensor& _self) {
return (-1)*_self;
})
.def(self + self) .def(self + self)
.def(self - self) .def(self - self)
.def(self += self) .def(self += self)
.def(self -= self) .def(self -= self)
.def(self * value_t()) .def(self * value_t())
.def(value_t() * self) .def(value_t() * self)
.def(self *= value_t())
.def(self / value_t()) .def(self / value_t())
/* .def(self /= self) */
.def("__itruediv__",
+[](TTTensor& _self, value_t _other) {
return (_self *= (1/_other));
})
// for TTOperator only: // for TTOperator only:
.def_static("identity", &TTOperator::identity<>) .def_static("identity", &TTOperator::identity<>)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment