tensor.cpp 8.96 KB
Newer Older
1
#include "misc.h"
2
3


Philipp  Trunschke's avatar
bugfix    
Philipp Trunschke committed
4
std::vector<size_t> strides_from_dimensions_and_item_size(const std::vector<size_t>& _dimensions, const size_t _item_size) {
5
6
7
	const size_t ndim = _dimensions.size();
	std::vector<size_t> strides(ndim, 0);
	if (ndim > 0) {
8
		strides[ndim-1] = _item_size;
9
		for (size_t i=0; i<ndim-1; ++i) {
10
11
			size_t rev_i = ndim-1-i;
			strides[rev_i-1] = _dimensions[rev_i] * strides[rev_i];
12
13
14
		}
	}
	return strides;
15
16
17
}


18
void expose_tensor(module& m) {
19
20
21
22
23
24
25
26
	enum_<Tensor::Representation>(m, "Representation", "Possible representations of Tensor objects.")
		.value("Dense", Tensor::Representation::Dense)
		.value("Sparse", Tensor::Representation::Sparse)
	;
	enum_<Tensor::Initialisation>(m, "Initialisation", "Possible initialisations of new Tensor objects.")
		.value("Zero", Tensor::Initialisation::Zero)
		.value("None", Tensor::Initialisation::None)
	;
27

28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
	class_<Tensor>(m, "Tensor", "a non-decomposed Tensor in either sparse or dense representation", buffer_protocol())
	.def_buffer([](Tensor& t) -> buffer_info {
		return buffer_info(
			t.get_dense_data(),                    /* Pointer to buffer */
			sizeof(value_t),                       /* Size of one scalar */
			format_descriptor<value_t>::format(),  /* Python struct-style format descriptor */
			t.order(),                             /* Number of dimensions */
			t.dimensions,                          /* Buffer dimensions */
			strides_from_dimensions_and_item_size(t.dimensions, sizeof(value_t))  /* Strides (in bytes) for each index */
		);
	})
	.def(pickle(
		[](const Tensor &_self) { // __getstate__
			return bytes(misc::serialize(_self));
		},
		[](bytes _bytes) { // __setstate__
			return misc::deserialize<Tensor>(_bytes);
		}
	))
	.def(init<>(), "constructs an empty Tensor")
	.def(init<Tensor>())
	.def(init<TensorNetwork>())
	.def(init<Tensor::DimensionTuple, Tensor::Representation, Tensor::Initialisation>(),
		"constructs a Tensor with the given dimensions",
		arg("dim"),
		arg("repr")=Tensor::Representation::Sparse,
		arg("init")=Tensor::Initialisation::Zero
	)
	.def(init<Tensor::DimensionTuple, std::function<value_t(std::vector<size_t>)>>())
	.def_static("from_function", +[](const Tensor::DimensionTuple& _dim, const std::function<value_t(std::vector<size_t>)> _f){
		LOG(warning, "Deprecation warning: `from_function` is deprecated and will be removed in Xerus v5.0.0. Use the `Tensor` constructor instead.");
		return Tensor(_dim, _f);
	})
	.def_static("from_buffer", +[](buffer& b){
		buffer_info info = b.request();
63

64
65
66
		if (info.format != format_descriptor<double>::format()) {
			throw std::runtime_error("Incompatible format: expected a double array!");
		}
67
		if (info.itemsize != sizeof(value_t)) {
Philipp  Trunschke's avatar
Philipp Trunschke committed
68
69
70
			std::ostringstream msg;
			msg << "Incompatible size: " << info.itemsize << " (got) vs " << sizeof(value_t) << " (expected)";
			throw std::runtime_error(msg.str());
71
72
73
74
		}
		if (info.shape.size() == 1 and info.shape[0] == 0) {
			return Tensor({}, Tensor::Representation::Dense, Tensor::Initialisation::None);
		}
75

76
77
78
		std::vector<size_t> dims(info.shape.begin(), info.shape.end());
		std::vector<size_t> strides(info.strides.begin(), info.strides.end());
		if (strides != strides_from_dimensions_and_item_size(dims, info.itemsize)) {
Philipp  Trunschke's avatar
Philipp Trunschke committed
79
80
81
			std::ostringstream msg;
			msg << "Incompatible strides: " << strides << " (got) vs " << strides_from_dimensions_and_item_size(dims, info.itemsize) << " (expected). Make sure your buffer is C contiguous." << std::endl;
			throw std::runtime_error(msg.str());
82
		}
Philipp  Trunschke's avatar
Philipp Trunschke committed
83

84
		Tensor result(dims, Tensor::Representation::Dense, Tensor::Initialisation::None);
85
86
		/* *(result.override_dense_data()) = static_cast<double*>(info.ptr); */
		misc::copy(result.get_unsanitized_dense_data(), static_cast<double*>(info.ptr), result.size);
87

88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
		return result;
	})
	.def("__float__", [](const Tensor &_self){
			if (_self.order() != 0) {
				throw value_error("order must be 0");
			}
			return value_t(_self());
	})
	.def_property_readonly("dimensions", +[](Tensor &_A) {
		return _A.dimensions;
	})
	.def("degree", &Tensor::degree)
	.def("order", &Tensor::order)
	.def_readonly("factor", &Tensor::factor)
	.def_readonly("size", &Tensor::size)
	.def("one_norm", &Tensor::one_norm)
	.def("frob_norm", &Tensor::frob_norm)
	.def_static("random",
		+[](std::vector<size_t> _dim) {
		return xerus::Tensor::random(_dim);
		},
		"Construct a tensor with i.i.d. Gaussian random entries."
		parametersDocstr
		"dim : list or tuple of int\n"
		"n : list or tuple of int, optional\n"
		"    number of non-zero entries",
		arg("dim")
		)
	.def_static("random",
		+[](std::vector<size_t> _dim, size_t _n) {
		return xerus::Tensor::random(_dim, _n);
		},
		arg("dim"), arg("n")
		)
	.def_static("random_orthogonal",
		+[](std::vector<size_t> _dimLhs, std::vector<size_t> _dimRhs) {
		return xerus::Tensor::random_orthogonal(_dimLhs, _dimRhs);
		})
	.def_static("ones", &Tensor::ones,
		 "Constructs a tensor of given dimensions that is equal to 1 everywhere."
		  parametersDocstr "dim : list or tuple of int",
	arg("dim")
	)
	.def_static("identity", &Tensor::identity, 
		"Constructs a Tensor representation of the identity operator with the given dimensions."
		parametersDocstr "dim : list or tuple of int",
134
arg("dim")
135
136
137
138
	)
	.def_static("kronecker", &Tensor::kronecker, 
		"Constructs a Tensor representation of the kronecker delta (=1 where all indices are identical, =0 otherwise)."
		parametersDocstr "dim : list or tuple of int",
139
arg("dim")
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
	)
	.def_static("dirac", static_cast<Tensor (*)(Tensor::DimensionTuple, const Tensor::MultiIndex&)>(&Tensor::dirac),
		"Construct a Tensor with a single entry equals one and all other zero."
		parametersDocstr
		"dim : list or tuple of int\n"
		"pos : list or tuple of int\n"
		"    position of the 1 entry",
		arg("dim"), arg("pos")
	)
	.def_static("dirac", static_cast<Tensor (*)(Tensor::DimensionTuple, const size_t)>(&Tensor::dirac))
	.def("has_factor", &Tensor::has_factor)
	.def("is_dense", &Tensor::is_dense)
	.def("is_sparse", &Tensor::is_sparse)
	.def("sparsity", &Tensor::sparsity)
	.def("all_entries_valid", &Tensor::all_entries_valid)
	.def("reorder_cost", &Tensor::reorder_cost)
	.def("reinterpret_dimensions", &Tensor::reinterpret_dimensions,
		arg("dim"),
		"Reinterprets the dimensions of the tensor."
		parametersDocstr
		"dim : list or tuple of int"
	)
	.def("resize_mode", &Tensor::resize_mode,
		"Resizes a specific mode of the Tensor."
		parametersDocstr
		"mode : int\n"
		"newDim : int\n"
		"cutPos : int, optional (default: infinity)\n"
		"    The position within the selected mode in front of which slates are inserted or removed.",
		arg("mode"), arg("newDim"), arg("cutPos")=~0ul
	)
	.def("fix_mode", &Tensor::fix_mode,
		"Fixes a specific mode to a specific value, effectively reducing the order by one."
		parametersDocstr
		"mode : int\n"
		"value : int",
		arg("mode"), arg("value")
	)
	.def("remove_slate", &Tensor::remove_slate,
		"Removes a single slate from the Tensor, reducing dimension[mode] by one."
		parametersDocstr
		"mode : int\n"
		"pos : int",
		arg("mode"), arg("pos")
	)
	.def("perform_trace", &Tensor::perform_trace)
	.def("offset_add", &Tensor::offset_add)
	.def("use_dense_representation", &Tensor::use_dense_representation)
	.def("use_sparse_representation", &Tensor::use_sparse_representation,
		arg("epsilon")=EPSILON
	)
	.def("sparse_copy", &Tensor::sparse_copy)
	.def("dense_copy", &Tensor::dense_copy)
	.def("ensure_own_data", &Tensor::ensure_own_data)
	.def("ensure_own_data_no_copy", &Tensor::ensure_own_data_no_copy)
	.def("apply_factor", &Tensor::apply_factor)
	.def("ensure_own_data_and_apply_factor", &Tensor::ensure_own_data_and_apply_factor)
	.def_static("multiIndex_to_position", &Tensor::multiIndex_to_position)
	.def_static("position_to_multiIndex", &Tensor::position_to_multiIndex)
	/* .def("__call__", +[](Tensor *_this, const std::vector<Index> &_idx){ */
	/*     return  new xerus::internal::IndexedTensor<Tensor>(std::move((*_this)(_idx))); */
	/* }, keep_alive<0,1>(), return_value_policy::take_ownership ) */
	.def("__call__", +[](Tensor& _this, args _args){
		std::vector<Index> idx;
		idx.reserve(_args.size());
		for (size_t i=0; i<_args.size(); ++i) {
			  idx.push_back(*(_args[i].cast<Index *>()));
		}
		return new xerus::internal::IndexedTensor<Tensor>(std::move(_this(idx)));
209
	}, keep_alive<0,1>(), return_value_policy::take_ownership )
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
	.def("__str__", &Tensor::to_string)
	.def(self * value_t())
	.def(value_t() * self)
	.def(self / value_t())
	.def(self + self)
	.def(self - self)
	.def(self += self)
	.def(self -= self)
	.def("__getitem__", +[](Tensor &_this, size_t _i) {
		if (_i >= _this.size) {
			throw index_error("Index out of range");
		}
		return _this[_i];
	})
	.def("__getitem__", +[](Tensor &_this, std::vector<size_t> _idx) {
		return _this[_idx];
	})
	.def("__setitem__", +[](Tensor &_this, size_t _i, value_t _val) {
		_this[_i] = _val;
	})
	.def("__setitem__", +[](Tensor &_this, std::vector<size_t> _i, value_t _val) {
		_this[_i] = _val;
	})
	// .def("__float__", [](const Tensor &_self){ return value_t(_self); })  //TODO: does not work! use implicitly_convertible<Tensor, internal::IndexedTensorReadOnly<TensorNetwork>>();
	;
235
}