tensor.h 45.6 KB
Newer Older
1
// // Xerus - A General Purpose Tensor Library
Sebastian Wolf's avatar
Sebastian Wolf committed
2
// Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf. 
Baum's avatar
Baum committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
// 
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
// 
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// 
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org 
// or contact us at contact@libXerus.org.

20
21
22
23
24
/**
 * @file
 * @brief Header file for the Tensor class.
 */

Baum's avatar
Baum committed
25
26
#pragma once

27
#include <map>
Sebastian Wolf's avatar
Sebastian Wolf committed
28
#include <limits>
29
30
#include <memory>
#include <random>
Sebastian Wolf's avatar
Sebastian Wolf committed
31

32
#include "basic.h"
Sebastian Wolf's avatar
Sebastian Wolf committed
33
#include "misc/containerSupport.h"
34
#include "misc/fileIO.h"
35
#include "misc/random.h"
36

37
#include "indexedTensor.h"
Sebastian Wolf's avatar
Sebastian Wolf committed
38

Baum's avatar
Baum committed
39
namespace xerus {
40
41
42
	//Forwad declarations
	class TensorNetwork;
	
43
	// NOTE these two functions are used in the template functions of Tensor so they have to be declared before the class..
Ben Huber's avatar
Ben Huber committed
44
	class Tensor;
45
	
Sebastian Wolf's avatar
Sebastian Wolf committed
46
47
48
49
50
51
52
    /** 
     * @brief Low-level contraction between Tensors.
     * @param _result Output for the result of the contraction.
     * @param _lhs left hand side of the contraction.
     * @param _lhsTrans Flags whether the LHS should be transposed (in the matrifications sense).
     * @param _rhs right hand side of the contraction.
     * @param _rhsTrans Flags whether the RHS should be transposed (in the matrifications sense).
53
     * @param _numModes number of modes that shall be contracted.
Sebastian Wolf's avatar
Sebastian Wolf committed
54
     */
55
56
    void contract(Tensor& _result, const Tensor& _lhs, const bool _lhsTrans, const Tensor& _rhs, const bool _rhsTrans, const size_t _numModes);
    Tensor contract(const Tensor& _lhs, const bool _lhsTrans, const Tensor& _rhs, const bool _rhsTrans, const size_t _numModes);
Sebastian Wolf's avatar
Sebastian Wolf committed
57
58
    
    
Ben Huber's avatar
Ben Huber committed
59
60
61
62
63
64
65
66
67
	
	/**
	 * @brief: Performs a simple reshuffle. Much less powerfull then a full evaluate, but more efficient.
	 * @details @a _shuffle shall be a vector that gives for every old index, its new position.
	 */
	void reshuffle(Tensor& _out, const Tensor& _base, const std::vector<size_t>& _shuffle);
	Tensor reshuffle(const Tensor& _base, const std::vector<size_t>& _shuffle);
	
	
68
	/// @brief Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
Sebastian Wolf's avatar
Sebastian Wolf committed
69
	class Tensor final {
70
	public:
71
72
		static size_t sparsityFactor; // NOTE not const so that users can modify this value!
		
73
74
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Auxiliary types- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
		
Sebastian Wolf's avatar
Sebastian Wolf committed
75
76
77
78
79
		/** 
		 * @brief Flags determining the initialisation of the data of Tensor objects. 
		 * @details None means that no initialisation is performed, i.e. the data can be random.
		 * Zero means that the data is zero initialized.
		 */
80
		enum class Initialisation : bool { None, Zero };
81
		
Sebastian Wolf's avatar
Sebastian Wolf committed
82
83
84
85
86
87
		/** 
		 * @brief Flags indicating the internal representation of the data of Tensor objects. 
		 * @details Dense means that an value_t array of 'size' is used to store each entry individually, 
		 * using row-major order. Sparse means that only the non-zero entries are stored explicitly in a set containing
		 * their value and position.
		 */
88
		enum class Representation : bool { Dense, Sparse };
Sebastian Wolf's avatar
Sebastian Wolf committed
89
90
		
		///@brief: Represention of the dimensions of a Tensor.
91
		typedef std::vector<size_t> DimensionTuple; // NOTE must not be declared as "using.." (internal segfault in gcc4.8.1)
Sebastian Wolf's avatar
Sebastian Wolf committed
92
93
		
		///@brief: Represention of a MultiIndex, i.e. the tuple of positions for each dimension determining a single position in a Tensor.
94
		typedef std::vector<size_t> MultiIndex; // NOTE must not be declared as "using.." (internal segfault in gcc4.8.1)
Sebastian Wolf's avatar
Sebastian Wolf committed
95
		
96
97
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Member variables - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
		/// @brief Vector containing the individual dimensions of the tensor.
Sebastian Wolf's avatar
Sebastian Wolf committed
98
		DimensionTuple dimensions;
99
100
		
		/// @brief Size of the Tensor -- always equal to the product of the dimensions.
101
102
		size_t size = 1;
		
103
		/// @brief Single value representing a constant scaling factor.
104
105
		value_t factor = 1.0;
		
106
		/// @brief The current representation of the Tensor (i.e Dense or Sparse)
107
		Representation representation = Representation::Sparse;
108
109
		
	private:
110
111
112
113
114
115
116
117
		/** 
		 * @brief Shared pointer to the dense data array, if representation is dense. 
		 * @details The data is stored such that indices increase from right to left (row-major order). 
		 * If the tensor is modified and not sole owner a deep copy is performed.
		 */
		std::shared_ptr<value_t> denseData;
		
		/** 
118
		 * @brief Shared pointer to the a map containing the non-zero entries, if representation is Sparse. 
119
120
121
122
		 * @details The entries are stored in a map which uses the position of each entry assuming row-major ordering as key value.
		 * If the tensor is modified and not sole owner a deep copy is performed.
		 */
		std::shared_ptr<std::map<size_t, value_t>> sparseData;
123
		
124
	public:
125
126
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Constructors - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
		
127
		/// @brief Constructs an order zero Tensor with the given inital representation
128
		explicit Tensor(const Representation _representation = Representation::Sparse);
129
		
130
		
131
		/// @brief Tensors are default copy constructable.
132
		Tensor( const Tensor&  _other ) = default;
133
134
135
		
		
		/// @brief Tensors are default move constructable.
Benjamin Huber's avatar
Benjamin Huber committed
136
		Tensor(       Tensor&& _other ) noexcept = default;
137
138
		
		/** 
139
		 * @brief: Creates a new tensor with the given dimensions.
140
		 * @param _dimensions the dimensions of the new tensor.
141
142
		 * @param _representation (optional) the initial representation of the tensor.
		 * @param _init (optional) inital data treatment, i.e. whether the tensor is to be zero Initialized.
143
		 */
144
		explicit Tensor(DimensionTuple _dimensions, const Representation _representation = Representation::Sparse, const Initialisation _init = Initialisation::Zero);
145
		
146
		
147
148
149
150
151
		/** 
		 * @brief: Creates a new (dense) tensor with the given dimensions, using a provided data.
		 * @param _dimensions the dimensions of the new tensor.
		 * @param _data inital dense data in row-major order.
		 */
152
		template<XERUS_ADD_MOVE(Vec, DimensionTuple), XERUS_ADD_MOVE(SPtr, std::shared_ptr<value_t>)>
153
		explicit Tensor(Vec&& _dimensions, SPtr&& _data)
154
		: dimensions(std::forward<Vec>(_dimensions)), size(misc::product(dimensions)), representation(Representation::Dense), denseData(std::forward<SPtr>(_data)) { }
155
		
156
		
157
158
159
160
161
		/** 
		 * @brief: Creates a new (dense) tensor with the given dimensions, using a provided data.
		 * @param _dimensions the dimensions of the new tensor.
		 * @param _data inital dense data in row-major order.
		 */
162
163
		explicit Tensor(DimensionTuple _dimensions, std::unique_ptr<value_t[]>&& _data);
		
164
165
166
167
168
169
170
171
		
		/** 
		 * @brief Constructs a Tensor with the given dimensions and uses the given function to assign the values to the entries.
		 * @details In this overload no value is passed to _f, i.e. _f must determine the values of the entries independend of their position,
		 * or keep track of the position itself. _f may assume that it is called for the entries in the order they are stored (i.e. row-major order)
		 * @param _dimensions the future dimensions of the Tensor.
		 * @param _f the function to use to set the entries of the Tensor. 
		 */
172
173
		explicit Tensor(DimensionTuple _dimensions, const std::function<value_t()>& _f);
		
174
175
176
177
178
179
180
		
		/** 
		 * @brief Constructs a Tensor with the given dimensions and uses the given function to assign the values to the entries.
		 * @details In this overload the position of each entry assuming row-major order is passed to  _f.
		 * @param _dimensions the future dimensions of the Tensor.
		 * @param _f the function to use to set the entries of the Tensor. 
		 */
181
182
		explicit Tensor(DimensionTuple _dimensions, const std::function<value_t(const size_t)>& _f);
		
183
184
185
186
187
188
189
		
		/** 
		 * @brief Constructs a Tensor with the given dimensions and uses the given function to assign the values to the entries.
		 * @details In this overload the complete position of each entry is passed to  _f.
		 * @param _dimensions the future dimensions of the Tensor.
		 * @param _f the function to use to set the entries of the Tensor. 
		 */
190
191
		explicit Tensor(DimensionTuple _dimensions, const std::function<value_t(const MultiIndex&)>& _f);
		
192
		
193
194
195
196
197
198
		/** 
		 * @brief Constructs a Tensor with the given dimensions and uses the given function @a _f to create @a _N non zero entries.
		 * @details @a _f is called with the current number of entries present and the number of possible entries (i.e. size). @a _f shall return a pair containg the position
		 * and value of the next entry. @a _f is required not to return a position twice.
		 * @param _dimensions the future dimensions of the Tensor.
		 * @param _N the number of non-zero entries to be created.
199
		 * @param _f the function to be used to create each non zero entry. 
200
		 */
201
		Tensor(DimensionTuple _dimensions, const size_t _N, const std::function<std::pair<size_t, value_t>(size_t, size_t)>& _f);
202
203
204
205
206
207
208
		
		
		/** 
		 * @brief Constructs a dense Tensor with the given dimensions and uses the given random generator and distribution to assign the values to the entries.
		 * @details The entries are assigned in the order they are stored (i.e. row-major order). Each assigned is a seperate call to the random distribution.
		 * @param _dimensions the future dimensions of the Tensor.
		 * @param _rnd the random generator to be used.
Sebastian Wolf's avatar
Sebastian Wolf committed
209
		 * @param _dist the random distribution to be used.
210
		 */
211
212
213
		template<class distribution=std::normal_distribution<value_t>, class generator=std::mt19937_64>
		static Tensor XERUS_warn_unused random(DimensionTuple _dimensions, distribution& _dist=xerus::misc::defaultNormalDistribution, generator& _rnd=xerus::misc::randomEngine) {
			Tensor result(std::move(_dimensions), Representation::Dense, Initialisation::None);
Sebastian Wolf's avatar
Sebastian Wolf committed
214
			value_t* const dataPtr = result.denseData.get();
215
			for(size_t i = 0; i < result.size; ++i) {
Sebastian Wolf's avatar
Sebastian Wolf committed
216
				dataPtr[i] = _dist(_rnd);
217
218
219
220
			}
			return result;
		}
		
221
		
222
223
224
225
		/** 
		 * @brief Constructs a dense Tensor with the given dimensions and uses the given random generator and distribution to assign the values to the entries.
		 * @details See the std::vector variant for details.
		 */
226
227
228
		template<class distribution=std::normal_distribution<value_t>, class generator=std::mt19937_64>
		XERUS_force_inline static Tensor XERUS_warn_unused random(std::initializer_list<size_t>&& _dimensions, distribution& _dist=xerus::misc::defaultNormalDistribution, generator& _rnd=xerus::misc::randomEngine) {
			return Tensor::random(DimensionTuple(std::move(_dimensions)), _dist, _rnd);
229
230
		}
		
231
		
Ben Huber's avatar
Ben Huber committed
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
		/** 
		 * @brief Constructs a dense Tensor with the given dimensions and uses the given random generator and distribution to assign the values to the entries.
		 * @details The entries are assigned in the order they are stored (i.e. row-major order). Each assigned is a seperate call to the random distribution.
		 * @param _dimensions the future dimensions of the Tensor.
		 * @param _rnd the random generator to be used.
		 * @param _dist the random distribution to be used.
		 */
		template<class generator=std::mt19937_64>
		static Tensor XERUS_warn_unused random_orthogonal(DimensionTuple _dimensions1, DimensionTuple _dimensions2, generator& _rnd=xerus::misc::randomEngine) {
			std::vector<size_t> dimensions = _dimensions1;
			dimensions.insert(dimensions.end(), _dimensions2.begin(), _dimensions2.end());
			const size_t m = misc::product(_dimensions1);
			const size_t n = misc::product(_dimensions2);
			const size_t max = std::max(m,n);
			const size_t min = std::min(m,n);
			Tensor result({max,min}, Representation::Sparse, Initialisation::Zero);
			
			typename generator::result_type randomness = 0;
			const size_t restart = size_t(std::log2(generator::max()));
			for (size_t i=0; i<min; ++i) {
				auto idx = i%restart;
				if (idx == 0) {
					randomness = _rnd();
				}
				if (randomness & (1<<idx)) {
					result[{i,i}] = 1;
				} else {
					result[{i,i}] = -1;
				}
			}
			
			for (size_t i=0; i<min-1; ++i) { // do k = n-1 to 1 by -1; 
				Tensor u = Tensor::random({max-i}, misc::defaultNormalDistribution, _rnd);
				u[0] -= u.frob_norm();
				u /= u.frob_norm();
				u.apply_factor();
				contract(u, u, false, u, false, 0);
				u *= -2.0;
				Tensor p = Tensor::identity({max,max});
				p.offset_add(u, {i,i});
				contract(result, p, false, result, false, 1);
			}
			
			if (m != max) {
				reshuffle(result, result, {1,0});
			}
			result.reinterpret_dimensions(std::move(dimensions));
			
			return result;
		}
		
		
284
285
286
287
288
289
290
291
		/** 
		 * @brief Constructs a random sparse Tensor with the given dimensions.
		 * @details The given random generator @a _rnd and distribution @a _dist are used to assign the values to @a _n randomly choosen entries.
		 * @param _dimensions the future dimensions of the Tensor.
		 * @param _N the number of non-zero entries to be created.
		 * @param _rnd the random generator to be used.
		 * @param _dist the random distribution to be used.
		 */
292
293
294
		template<class distribution=std::normal_distribution<value_t>, class generator=std::mt19937_64>
		static Tensor XERUS_warn_unused random(DimensionTuple _dimensions, const size_t _N, distribution& _dist=xerus::misc::defaultNormalDistribution, generator& _rnd=xerus::misc::randomEngine) {
			Tensor result(std::move(_dimensions), Representation::Sparse, Initialisation::Zero);
295
			XERUS_REQUIRE(_N <= result.size, " Cannot create " << _N << " non zero entries in a tensor with only " << result.size << " total entries!");
296
297
298
299
300
301
302
			
			std::uniform_int_distribution<size_t> entryDist(0, result.size-1);
			while(result.sparseData->size() < _N) {
				result.sparseData->emplace(entryDist(_rnd), _dist(_rnd));
			}
			return result;
		}
Sebastian Wolf's avatar
Sebastian Wolf committed
303
		
304
		
305
306
307
308
		/** 
		 * @brief Constructs a random sparse Tensor with the given dimensions.
		 * @details See the std::vector variant for details.
		 */
309
310
		template<class distribution=std::normal_distribution<value_t>, class generator=std::mt19937_64>
		XERUS_force_inline static Tensor XERUS_warn_unused random(std::initializer_list<size_t>&& _dimensions, const size_t _N, distribution& _dist, generator& _rnd) {
Sebastian Wolf's avatar
Sebastian Wolf committed
311
			return Tensor::random(DimensionTuple(_dimensions), _N, _rnd, _dist);
312
313
		}
		
314
		
315
		/** 
Sebastian Wolf's avatar
Sebastian Wolf committed
316
		 * @brief: Returns a Tensor with all entries equal to one.
317
318
		 * @param _dimensions the dimensions of the new tensor.
		 */
319
		static Tensor XERUS_warn_unused ones(DimensionTuple _dimensions);
320
		
321
		
322
		/** 
Sebastian Wolf's avatar
Sebastian Wolf committed
323
		 * @brief: Returns a Tensor representation of the identity operator with the given dimensions.
324
		 * @details That is combining the first half of the dimensions and the second half of the dimensions results in an identity matrix.
325
326
		 * @param _dimensions the dimensions of the new tensor. It is required that _dimensions[i] = _dimensions[d/2+i], otherwise this cannot be the identity operator.
		 */
327
		static Tensor XERUS_warn_unused identity(DimensionTuple _dimensions);
328
		
329
		
330
		/** 
Sebastian Wolf's avatar
Sebastian Wolf committed
331
		 * @brief: Returns a Tensor representation of the kronecker delta.
332
		 * @details That is each entry is one if all indices are equal and zero otherwise. Note iff d=2 this coincides with identity.
333
334
		 * @param _dimensions the dimensions of the new tensor.
		 */
335
		static Tensor XERUS_warn_unused kronecker(DimensionTuple _dimensions);
336
		
337
		
338
		/** 
Sebastian Wolf's avatar
Sebastian Wolf committed
339
		 * @brief: Returns a Tensor with a single entry equals oen and all other zero.
340
		 * @param _dimensions the dimensions of the new tensor.
341
		 * @param _position The position of the one
342
		 */
343
		static Tensor XERUS_warn_unused dirac(DimensionTuple _dimensions, const MultiIndex& _position);
344
		
345
		
346
		/** 
Sebastian Wolf's avatar
Sebastian Wolf committed
347
		 * @brief: Returns a Tensor with a single entry equals oen and all other zero.
348
		 * @param _dimensions the dimensions of the new tensor.
349
		 * @param _position The position of the one
350
		 */
351
		static Tensor XERUS_warn_unused dirac(DimensionTuple _dimensions, const size_t _position);
352
		
353

Sebastian Wolf's avatar
Sebastian Wolf committed
354
355
		/// @brief Returns a copy of this Tensor that uses a dense representation.
		Tensor dense_copy() const;
356
		
357
		
Sebastian Wolf's avatar
Sebastian Wolf committed
358
359
		/// @brief Returns a copy of this Tensor that uses a sparse representation.
		Tensor sparse_copy() const;
360
		
361
362
363
364
365
366
367
368
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Standard operators - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
		/** 
		 * @brief Standard assignment operator.
		 * @param _other the Tensor to be assinged to this one.
		 * @return a reference to this Tensor.
		 */
		Tensor& operator=(const Tensor&  _other) = default;
		
369
		
370
371
372
373
374
		/** 
		 * @brief Standard move-assignment operator.
		 * @param _other the Tensor to be move-assinged to this one.
		 * @return a reference to this Tensor.
		 */
375
		Tensor& operator=(      Tensor&& _other) = default;
376
377
		
		
378
379
380
381
382
383
384
385
		/** 
		 * @brief Assigns the given TensorNetwork to this Tensor by completely contracting the network.
		 * @param _other the TensorNetwork to be to this Tensor.
		 * @return a reference to this Tensor.
		 */
		Tensor& operator=(const TensorNetwork& _network);
		
		
386
387
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Information - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
		
Sebastian Wolf's avatar
Sebastian Wolf committed
388
389
390
391
392
393
394
395
396
397
398
399
400
		/** 
		 * @brief Returns the degree of the tensor.
		 * @details The degree is always equals to dimensions.size()
		 * @return the degree of the tensor
		 */
		size_t degree() const;
		
		/** 
		 * @brief Checks whether the tensor has a non-trivial global scaling factor.
		 * @return true if there is a non-trivial factor, false if not.
		 */
		bool has_factor() const;
		
401
		/// @brief Returns whether the current representation is dense.
402
		bool is_dense() const;
403
404
		
		/// @brief Returns whether the current representation is sparse.
405
		bool is_sparse() const;
406
		
407
408
409
410
411
412
413
		/** 
		 * @brief Returns the number currently saved entries. 
		 * @details Note that this is not nessecarily the number of non-zero entries as the saved entries may contain
		 * zeros. Even more if a dense representation is used size is returned. 
		 */
		size_t sparsity() const;
		
Sebastian Wolf's avatar
Sebastian Wolf committed
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
		/** 
		 * @brief Determines the number of non-zero entries.
		 * @param _eps (optional) epsilon detrmining the maximal value, that is still assumed to be zero.
		 * @return the number of non-zero entries found.
		 */
		size_t count_non_zero_entries(const value_t _eps = std::numeric_limits<value_t>::epsilon()) const;
		
		/** 
		 * @brief Checks the tensor for illegal entries, e.g. nan, inf,...
		 * @return TRUE there are no invalid entries, FALSE otherwise.
		 */
		bool all_entries_valid() const;
		
		/** 
		 * @brief Approximates the cost to reorder the tensor.
		 * @return the approximated costs.
		 */
431
		size_t reorder_cost() const;
432
		
Sebastian Wolf's avatar
Sebastian Wolf committed
433
434
435
436
437
		/** 
		 * @brief Calculates the frobenious norm of the tensor.
		 * @return the frobenious norm.
		 */
		value_t frob_norm() const;
438
		
439
440
		
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Basic arithmetics - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
		/** 
		 * @brief Adds the @a _other Tensor entrywise to this one.
		 * @details To be well-defined it is required that the dimensions of this and @a _other coincide.
		 * @param _other the Tensor to be added to this one.
		 * @return a reference to this Tensor.
		 */
		Tensor& operator+=(const Tensor& _other);
		
		/** 
		 * @brief Subtracts the @a _other Tensor entrywise from this one.
		 * @details To be well-defined it is required that the dimensions of this and @a _other coincide.
		 * @param _other the Tensor to be subtracted to this one.
		 * @return a reference to this Tensor.
		 */
		Tensor& operator-=(const Tensor& _other);
		
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
		/** 
		 * @brief Performs the entrywise multiplication with a constant @a _factor.
		 * @details Internally this only results in a change in the global factor.
		 * @param _factor the factor,
		 * @return a reference to this Tensor.
		 */
		Tensor& operator*=(const value_t _factor);
		
		/** 
		 * @brief Performs the entrywise divison by a constant @a _divisor.
		 * @details Internally this only results in a change in the global factor.
		 * @param _divisor the divisor,
		 * @return a reference to this Tensor.
		 */ 
		Tensor& operator/=(const value_t _divisor);
		
473
		
474
475
476
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Access - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
		
		/** 
Sebastian Wolf's avatar
Sebastian Wolf committed
477
478
479
480
		 * @brief Read/Write access a single entry.
		 * @param _position the position of the desired entry, assuming row-major ordering.
		 * @return a reference to the selected entry.
		 */
481
		value_t& operator[](const size_t _position);
482
		
Sebastian Wolf's avatar
Sebastian Wolf committed
483
484
485
		/** 
		 * @brief Read access a single entry.
		 * @param _position the position of the desired entry, assuming row-major ordering.
486
		 * @return the value of the selected entry.
Sebastian Wolf's avatar
Sebastian Wolf committed
487
		 */
488
		value_t operator[](const size_t _position) const;
489
		
Sebastian Wolf's avatar
Sebastian Wolf committed
490
491
		/** 
		 * @brief Read/Write access a single entry.
492
		 * @param _positions the positions of the desired entry.
Sebastian Wolf's avatar
Sebastian Wolf committed
493
494
		 * @return a reference to the selected entry.
		 */
Sebastian Wolf's avatar
Sebastian Wolf committed
495
		value_t& operator[](const MultiIndex& _positions);
496
		
Sebastian Wolf's avatar
Sebastian Wolf committed
497
498
		/** 
		 * @brief Read access a single entry.
499
		 * @param _positions the positions of the desired entry.
500
		 * @return the value of the selected entry.
Sebastian Wolf's avatar
Sebastian Wolf committed
501
		 */
Sebastian Wolf's avatar
Sebastian Wolf committed
502
		value_t operator[](const MultiIndex& _positions) const;
503
504
		
		
505
		/** 
506
		 * @brief Unsanitized access to a single entry.
507
508
509
		 * @param _position the position of the desired entry, assuming row-major ordering.
		 * @return the value of the selected entry.
		 */
510
		value_t& at(const size_t _position);
511
512
		
		/** 
513
514
		 * @brief Unsanitized read access to a single entry.
		 * @param _position the position of the desired entry, assuming row-major ordering.
515
516
		 * @return the value of the selected entry.
		 */
517
		value_t cat(const size_t _position) const;
518
		
519
520
521
522
523
524
525
526
527
528
		/** 
		 * @brief Returns a pointer for direct access to the dense data array in row major order. 
		 * @details Also takes care that this direct access is safe, i.e. that this tensor is using a dense representation, is the sole owner of the data and that no non trivial factor exists.
		 * @return pointer to the dense data array.
		 */
		value_t* get_dense_data();
		
		/** 
		 * @brief Gives access to the internal data pointer, without any checks.
		 * @details Note that the dense data array might not exist because a sparse representation is used, may shared with other tensors 
529
		 * or has to be interpreted considering a gloal factor. Both can be avoid if using get_dense_data(). The tensor data itself is stored in row-major ordering.
530
531
532
533
534
535
536
		 * @return pointer to the internal dense data array.
		 */
		value_t* get_unsanitized_dense_data();
		
		/** 
		 * @brief Gives access to the internal data pointer, without any checks.
		 * @details Note that the dense data array might not exist because a sparse representation is used, may shared with other tensors 
537
		 * or has to be interpreted considering a gloal factor. Both can be avoid if using get_dense_data(). The tensor data itself is stored in row-major ordering.
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
		 * @return pointer to the internal dense data array.
		 */
		const value_t* get_unsanitized_dense_data() const;
		
		/** 
		 * @brief Returns a pointer to the internal dense data array for complete rewrite purpose ONLY.
		 * @details This is equivalent to calling reset() with the current dimensions, dense representation and no initialisation and then
		 * calling get_unsanitized_dense_data().
		 * @return pointer to the internal dense data array.
		 */
		value_t* override_dense_data();
		
		/** 
		 * @brief Gives access to the internal shared data pointer, without any checks.
		 * @details Note that the data array might be shared with other tensors or has to be interpreted considering a global
553
		 * factor. Both can be avoid if using get_dense_data(). The tensor data itself is stored in row-major ordering.
554
555
556
557
		 * @return The internal shared pointer to the data array.
		 */
		const std::shared_ptr<value_t>& get_internal_dense_data();
		
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
		/** 
		 * @brief Returns a reference for direct access to the sparse data map. 
		 * @details Also takes care that this direct access is safe, i.e. that this tensor is using a dense representation, is the sole owner of the data and that no non trivial factor exists.
		 * @return reference to the sparse data map.
		 */
		std::map<size_t, value_t>& get_sparse_data();
		
		/** 
		 * @brief Gives access to the internal sparse map, without any checks.
		 * @details Note that the sparse data map might not exist because no sparse representation is used, 
		 * may shared with other tensors or has to be interpreted considering a gloal factor. Both can be avoid if using get_sparse_data().
		 * @return reference to the internal sparse data map.
		 */
		std::map<size_t, value_t>& get_unsanitized_sparse_data();
		
		/** 
		 * @brief Gives access to the internal sparse map, without any checks.
		 * @details Note that the sparse data map might not exist because no sparse representation is used, 
		 * may shared with other tensors or has to be interpreted considering a gloal factor. Both can be avoid if using get_sparse_data().
		 * @return reference to the internal sparse data map.
		 */
579
		const std::map<size_t, value_t>& get_unsanitized_sparse_data() const;
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
		
		/** 
		 * @brief Returns a pointer to the internal sparse data map for complete rewrite purpose ONLY.
		 * @details This is equivalent to calling reset() with the current dimensions, sparse representation and no initialisation and then
		 * calling get_unsanitized_sparse_data().
		 * @return reference to the internal sparse data map.
		 */
		std::map<size_t, value_t>& override_sparse_data();
		
		/** 
		 * @brief Gives access to the internal shared sparse data pointer, without any checks.
		 * @details Note that the sparse data map might not exist because no sparse representation is used, 
		 * may shared with other tensors or has to be interpreted considering a gloal factor. Both can be avoid if using get_sparse_data().
		 * @return The internal shared pointer to the sparse data map.
		 */
		const std::shared_ptr<std::map<size_t, value_t>>& get_internal_sparse_data();
		
597
		
598
599
600
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Indexing - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
		
		/** 
601
		 * @brief Indexes the Tensor for read/write use.
Sebastian Wolf's avatar
Sebastian Wolf committed
602
603
604
		 * @param _args several [indices](@ref Index) determining the desired index order.
		 * @return an internal representation of an IndexedTensor.
		 */
605
		template<typename... args>
Sebastian Wolf's avatar
Sebastian Wolf committed
606
607
		internal::IndexedTensor<Tensor> operator()(args... _args) {
			return internal::IndexedTensor<Tensor>(this, std::vector<Index>({_args...}), false);
608
609
610
611
		}
		
		
		/** 
612
		 * @brief Indexes the Tensor for read only use.
Sebastian Wolf's avatar
Sebastian Wolf committed
613
614
615
		 * @param _args several [indices](@ref Index) determining the desired index order.
		 * @return an internal representation of an IndexedTensor.
		 */
616
		template<typename... args>
Sebastian Wolf's avatar
Sebastian Wolf committed
617
618
		internal::IndexedTensorReadOnly<Tensor> operator()(args... _args) const {
			return internal::IndexedTensorReadOnly<Tensor>(this, std::vector<Index>({_args...}));
619
620
621
622
		}
		
		
		/** 
Sebastian Wolf's avatar
Sebastian Wolf committed
623
624
625
626
		 * @brief Indexes the tensor for read/write use.
		 * @param _indices several [indices](@ref Index) determining the desired index order.
		 * @return an internal representation of an IndexedTensor.
		 */
Sebastian Wolf's avatar
Sebastian Wolf committed
627
		internal::IndexedTensor<Tensor> operator()(const std::vector<Index>&  _indices);
628
		
Sebastian Wolf's avatar
Sebastian Wolf committed
629
630
631
632
633
634
		
		/** 
		 * @brief Indexes the tensor for read/write use.
		 * @param _indices several [indices](@ref Index) determining the desired index order.
		 * @return an internal representation of an IndexedTensor.
		 */
Sebastian Wolf's avatar
Sebastian Wolf committed
635
		internal::IndexedTensor<Tensor> operator()(	  std::vector<Index>&& _indices);
Sebastian Wolf's avatar
Sebastian Wolf committed
636
637
638
639
640
641
642

		
		/** 
		 * @brief Indexes the tensor for read only use.
		 * @param _indices several [indices](@ref Index) determining the desired index order.
		 * @return an internal representation of an IndexedTensor.
		 */
Sebastian Wolf's avatar
Sebastian Wolf committed
643
		internal::IndexedTensorReadOnly<Tensor> operator()(const std::vector<Index>&  _indices) const;
644
		
Sebastian Wolf's avatar
Sebastian Wolf committed
645
646
647
648
649
650
		
		/** 
		 * @brief Indexes the tensor for read only use.
		 * @param _indices Several [indices](@ref Index) determining the desired index order.
		 * @return an internal representation of an IndexedTensor.
		 */
Sebastian Wolf's avatar
Sebastian Wolf committed
651
		internal::IndexedTensorReadOnly<Tensor> operator()(	  std::vector<Index>&& _indices) const;
652
		
653
		
654
		
Sebastian Wolf's avatar
Sebastian Wolf committed
655
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Modifiers - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
656
		
657
658
659
660
661
662
663
		/** 
		 * @brief Resets the tensor to the given dimensions and representation.
		 * @details Leaves the Tensor in the same state as if newly constructed with the the same arguments.
		 * @param _dimensions the dimensions of the new tensor.
		 * @param _representation the new representation of the tensor.
		 * @param _init (optional) data treatment, i.e. whether the tensor shall be zero initialized.
		 */
664
665
		void reset(DimensionTuple _newDim, const Representation _representation, const Initialisation _init = Initialisation::Zero);
		
666
667
668
669
670
671
		
		/** 
		 * @brief Resets the tensor to the given dimensions, preserving the current representation.
		 * @param _dimensions the dimensions of the new tensor.
		 * @param _init (optional) data treatment, i.e. whether the tensor shall be zero initialized.
		 */
672
673
		void reset(DimensionTuple _newDim, const Initialisation _init = Initialisation::Zero);
		
Sebastian Wolf's avatar
Sebastian Wolf committed
674
675
676
677
678
		/** 
		 * @brief Resets the tensor as if default initialized.
		 */
		void reset();
		
679
680
681
682
683
684
		
		/** 
		 * @brief Resets the tensor to the given dimensions and uses the given data.
		 * @param _dimensions the dimensions of the new tensor.
		 * @param _newData new dense data in row-major order.
		 */
685
686
		void reset(DimensionTuple _newDim, const std::shared_ptr<value_t>& _newData);
		
687
688
		
		/** 
689
		 * @brief Resets the tensor to the given dimensions with data @a _newData.
690
691
692
		 * @param _dimensions the dimensions of the new tensor.
		 * @param _newData new dense data in row-major order.
		 */
693
694
		void reset(DimensionTuple _newDim, std::unique_ptr<value_t[]>&& _newData);
		
695
		
696
697
698
699
700
701
		/**
		 * @brief Resets the tensor to a given dimensionstuple with sparse data @a _newData
		 */
		void reset(DimensionTuple _newDim, std::map<size_t, value_t>&& _newData);
		
		
Sebastian Wolf's avatar
Sebastian Wolf committed
702
703
704
		/** 
		 * @brief Reinterprets the dimensions of the tensor.
		 * @details For this simple reinterpretation it is nessecary that the size implied by the new dimensions is the same as to old size 
705
706
		 * (a vector with 16 entries cannot be interpreted as a 10x10 matrix, but it can be interpreted as a 4x4 matrix). If a real change in size is 
		 * required use resize_mode() instead.
Sebastian Wolf's avatar
Sebastian Wolf committed
707
708
		 * @param _newDimensions the dimensions the tensor shall be interpreted to have. 
		 */
709
		void reinterpret_dimensions(DimensionTuple _newDimensions);
710
		
711
		/** 
712
713
714
715
716
		 * @brief Resizes a specific mode of the Tensor.
		 * @details Use this function only if the content of the tensor shall stay, otherwise use reset().
		 * @param _mode the mode to resize.
		 * @param _newDim the new dimension that mode shall have.
		 * @param _cutPos the position within the selected mode in front of which slates are inserted 
717
718
		 * or removed. By default the current dimension, i.e new slates are added after the last current one
		 * and removed starting from the last one.
719
		 */
720
721
		void resize_mode(const size_t _mode, const size_t _newDim, size_t _cutPos=~0ul);
		
722
		
Sebastian Wolf's avatar
Sebastian Wolf committed
723
		/** 
724
725
726
		 * @brief Fixes a specific mode to a specific value, effectively reducing the order by one.
		 * @param _mode the mode in which the slate shall be fixed, e.g. 0 to fix the first mode.
		 * @param _slatePosition the position in the corresponding mode that shall be used. 0 <= _slatePosition < dimension[_mode]
Sebastian Wolf's avatar
Sebastian Wolf committed
727
		 */
728
729
		void fix_mode(const size_t _mode, const size_t _slatePosition);
		
730
		
731
		/** 
732
733
734
		 * @brief Removes a single slate from the Tensor, reducing dimension[_mode] by one.
		 * @param _mode the mode that will be reduced.
		 * @param _pos the index within the selected mode for which the slate shall be removed.
735
		 */
736
		void remove_slate(const size_t _mode, const size_t _pos);
737
		
Sebastian Wolf's avatar
Sebastian Wolf committed
738
739
740
741
742
743
744
745
		
		/** 
		 * @brief Performs the trace over the given indices
		 * @param _firstIndex the first index involved in the trace.
		 * @param _secondIndex the second index involved in the trace.
		 */
		void perform_trace(size_t _firstIndex, size_t _secondIndex);
		
746
		
747
748
749
750
751
		/** 
		 * @brief Modifies the diagonal entries according to the given function.
		 * @details In this overload only the current diagonal entries are passed to @a _f, one at a time. At the moment this is only defined for matricies.
		 * @param _f the function to call to modify each entry.
		 */
752
		void modify_diagonal_entries(const std::function<void(value_t&)>& _f);
753
		
754
		
755
756
757
758
759
		/** 
		 * @brief Modifies the diagonal entries according to the given function.
		 * @details In this overload the current diagonal entries are passed to @a _f, one at a time, together with their position on the diagonal. At the moment this is only defined for matricies.
		 * @param _f the function to call to modify each entry.
		 */
760
		void modify_diagonal_entries(const std::function<void(value_t&, const size_t)>& _f);
761
		
762
		
763
764
765
766
767
		/** 
		 * @brief Modifies every entry according to the given function.
		 * @details In this overload only the current entry is passed to @a _f.
		 * @param _f the function to call to modify each entry.
		 */
768
		void modify_entries(const std::function<void(value_t&)>& _f);
769
		
770
		
771
772
773
774
775
		/** 
		 * @brief Modifies every entry according to the given function.
		 * @details In this overload the current entry together with its position, assuming row-major ordering is passed to @a _f.
		 * @param _f the function to call to modify each entry.
		 */
776
		void modify_entries(const std::function<void(value_t&, const size_t)>& _f);
777
		
778
		
779
780
781
782
783
		/** 
		 * @brief Modifies every entry according to the given function.
		 * @details In this overload the current entry together with its complete position is passed to @a _f.
		 * @param _f the function to call to modify each entry.
		 */
784
		void modify_entries(const std::function<void(value_t&, const MultiIndex&)>& _f);
785
		
786
787
788
789
790
791
		/** 
		 * @brief Adds the given Tensor with the given offsets to this one.
		 * @param _other Tensor that shall be added to this one, the orders must coincide.
		 * @param _offsets the offsets to be used.
		 */
		void offset_add(const Tensor& _other, const std::vector<size_t>& _offsets);
792
		
793
		/** 
794
		 * @brief Converts the Tensor to a dense representation.
795
796
797
		 */
		void use_dense_representation();
		
798
		
799
		/** 
800
801
802
803
804
805
806
		 * @brief Converts the Tensor to a dense representation if sparsity * sparsityFactor >= size
		 */
		void use_dense_representation_if_desirable();
		
		
		/** 
		 * @brief Converts the Tensor to a sparse representation.
807
808
809
		 */
		void use_sparse_representation(const value_t _eps = std::numeric_limits<value_t>::epsilon());
		
Sebastian Wolf's avatar
Sebastian Wolf committed
810
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Miscellaneous - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
Sebastian Wolf's avatar
Sebastian Wolf committed
811
812
813
		
		/** 
		 * @brief Creates a string representation of the Tensor.
814
		 * @note the mapping is not unique and can thus not be used to recreate the original tensor
815
		 * @return the string representation. 
Sebastian Wolf's avatar
Sebastian Wolf committed
816
		 */
817
		std::string to_string() const;
818
		
Sebastian Wolf's avatar
Sebastian Wolf committed
819
		
820
821
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Auxiliary functions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
		
822
		static size_t multiIndex_to_position(const MultiIndex& _multiIndex, const DimensionTuple& _dimensions);
823
		
824
825
		static MultiIndex position_to_multiIndex(size_t _position, const DimensionTuple& _dimensions);
		
826
827
		
		/*- - - - - - - - - - - - - - - - - - - - - - - - - - Internal Helper functions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
828
	protected:
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
		template<int sign>
		static void plus_minus_equal(Tensor& _me, const Tensor& _other);
		
		/// @brief Adds the given sparse data to the given full data
		static void add_sparse_to_full(const std::shared_ptr<value_t>& _denseData, const value_t _factor, const std::shared_ptr<const std::map<size_t, value_t>>& _sparseData);
		
		/// @brief Adds the given sparse data to the given sparse data
		static void add_sparse_to_sparse(const std::shared_ptr<std::map<size_t, value_t>>& _sum, const value_t _factor, const std::shared_ptr<const std::map<size_t, value_t>>& _summand);
		
	public:
		
		/// @brief Ensures that this tensor is the sole owner of its data. If needed new space is allocated and all entries are copied.
		void ensure_own_data();
		
		/// @brief Ensures that this tensor is the sole owner of its data space. If needed new space is allocated with entries left undefined.
		void ensure_own_data_no_copy();
		
		/// @brief Checks whether there is a non-trivial scaling factor and applies it if nessecary.
		void apply_factor();
		
		/// @brief Checks whether there is a non-trivial factor and applies it. Even if no factor is applied ensure_own_data() is called.
		void ensure_own_data_and_apply_factor();
851
852
	};
	
853
	/*- - - - - - - - - - - - - - - - - - - - - - - - - - External functions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
Sebastian Wolf's avatar
Sebastian Wolf committed
854
	
Sebastian Wolf's avatar
Sebastian Wolf committed
855
856
857
858
859
    /** 
     * @brief Low-level contraction between Tensors.
     * @param _result Output for the result of the contraction.
     * @param _lhs left hand side of the contraction.
     * @param _rhs right hand side of the contraction.
860
     * @param _numModes number of indices that shall be contracted.
Sebastian Wolf's avatar
Sebastian Wolf committed
861
     */
862
863
    XERUS_force_inline void contract(Tensor& _result, const Tensor& _lhs,  const Tensor& _rhs, const size_t _numModes) {
        contract(_result, _lhs, false, _rhs, false, _numModes);
Sebastian Wolf's avatar
Sebastian Wolf committed
864
865
    }
    
866
867
    XERUS_force_inline Tensor contract(const Tensor& _lhs, const Tensor& _rhs, const size_t _numModes) {
        return contract(_lhs, false, _rhs, false, _numModes);
Sebastian Wolf's avatar
Sebastian Wolf committed
868
    }
Sebastian Wolf's avatar
Sebastian Wolf committed
869
870
871
	
	/*- - - - - - - - - - - - - - - - - - - - - - - - - - Basic arithmetics - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
	
872
	/** 
Sebastian Wolf's avatar
Sebastian Wolf committed
873
874
875
	 * @brief Calculates the entrywise sum of @a _lhs and @a _rhs.
	 * @details To be well-defined it is required that the dimensions of  @a _lhs and @a _rhs coincide.
	 * @param _lhs the first summand.
876
	 * @param _rhs the second summand.
Sebastian Wolf's avatar
Sebastian Wolf committed
877
878
	 * @return the sum.
	 */
879
	Tensor operator+(Tensor _lhs, const Tensor& _rhs);
Sebastian Wolf's avatar
Sebastian Wolf committed
880
881
882
883
884
885
886
887
	
	/** 
	 * @brief Calculates the entrywise difference between @a _lhs and @a _rhs.
	 * @details To be well-defined it is required that the dimensions of  @a _lhs and @a _rhs coincide.
	 * @param _lhs the minuend.
	 * @param _rhs the subtrahend.
	 * @return the difference.
	 */
888
	Tensor operator-(Tensor _lhs, const Tensor& _rhs);
Sebastian Wolf's avatar
Sebastian Wolf committed
889
890
891
	
	/** 
	 * @brief Calculates the entrywise multiplication of the Tensor @a _tensor with the constant @a _factor.
892
	 * @details Internally this only results in a change in the global factor.
Sebastian Wolf's avatar
Sebastian Wolf committed
893
894
	 * @param _factor the factor to be used.
	 * @param _tensor the Tensor that shall be scaled.
Sebastian Wolf's avatar
Sebastian Wolf committed
895
	 * @return the resulting scaled Tensor.
896
	 */
897
	Tensor operator*(const value_t _factor, Tensor _tensor);
Sebastian Wolf's avatar
Sebastian Wolf committed
898
899
900
901
902
903
904
905
	
	/** 
	 * @brief Calculates the entrywise multiplication of the Tensor @a _tensor with the constant @a _factor.
	 * @details Internally this only results in a change in the global factor.
	 * @param _tensor the Tensor that shall be scaled.
	 * @param _factor the factor to be used.
	 * @return the resulting scaled Tensor.
	 */
906
	Tensor operator*(Tensor _tensor, const value_t _factor);
Sebastian Wolf's avatar
Sebastian Wolf committed
907
908
909
910
911
912
913
914
	
	/** 
	 * @brief Calculates the entrywise divison of the Tensor @a _tensor with the constant @a _divisor.
	 * @details Internally this only results in a change in the global factor.
	 * @param _tensor the Tensor that shall be scaled.
	 * @param _divisor the factor to be used.
	 * @return the resulting scaled Tensor.
	 */
915
	Tensor operator/(Tensor _tensor, const value_t _divisor);
916
	
917
	
918
	
919
920
921
922
923
	/** 
	* @brief Calculates the frobenius norm of the given tensor
	* @param _tensor the Tensor of which the frobenious norm shall be calculated.
	* @return the frobenius norm .
	*/
924
	static XERUS_force_inline value_t frob_norm(const Tensor& _tensor) { return _tensor.frob_norm(); }
925
	
926
	/** 
927
	 * @brief Low-Level SVD calculation of a given Tensor @a _input = @a _U @a _S @a _Vt.
928
929
930
	 * @param _U Output Tensor for the resulting U.
	 * @param _S Output Tensor for the resulting S.
	 * @param _Vt Output Tensor for the resulting Vt.
931
	 * @param _input input Tensor of which the SVD shall be calculated.
932
933
	 * @param _splitPos index position at defining the matrification for which the SVD is calculated.
	 */
934
	void calculate_svd(Tensor& _U, Tensor& _S, Tensor& _Vt, Tensor _input, const size_t _splitPos, const size_t _maxRank, const value_t _eps);
935
936
937
938
939
940
941
942
	
	/** 
	 * @brief Low-Level QR calculation of a given Tensor @a _input = @a _Q @a _R.
	 * @param _Q Output Tensor for the resulting Q.
	 * @param _R Output Tensor for the resulting R.
	 * @param _input input Tensor of which the QR shall be calculated.
	 * @param _splitPos index position at defining the matrification for which the QR is calculated.
	 */
943
	void calculate_qr(Tensor& _Q, Tensor& _R, Tensor _input, const size_t _splitPos);
944
945
946
947
948
949
950
951
	
	/** 
	 * @brief Low-Level RQ calculation of a given Tensor @a _input = @a _R @a _Q.
	 * @param _R Output Tensor for the resulting R.
	 * @param _Q Output Tensor for the resulting Q.
	 * @param _input input Tensor of which the RQ shall be calculated.
	 * @param _splitPos index position at defining the matrification for which the RQ is calculated.
	 */
952
	void calculate_rq(Tensor& _R, Tensor& _Q, Tensor _input, const size_t _splitPos);
953
954
955
956
957
958
959
960
961
962
	
	/** 
	 * @brief Low-Level QC calculation of a given Tensor @a _input = @a _Q @a _C.
	 * @details This is a rank revealing QR decomposition with coloum pivoting. In contrast to an QR
	 * the C is not nessecarily upper triangular.
	 * @param _Q Output Tensor for the resulting Q.
	 * @param _C Output Tensor for the resulting R.
	 * @param _input input Tensor of which the QC shall be calculated.
	 * @param _splitPos index position at defining the matrification for which the QC is calculated.
	 */
963
	void calculate_qc(Tensor& _Q, Tensor& _C, Tensor _input, const size_t _splitPos);
964
965
966
967
968
969
970
971
972
973
	
	/** 
	 * @brief Low-Level CQ calculation of a given Tensor @a _input = @a _C @a _Q.
	 * @details This is a rank revealing RQ decomposition with coloum pivoting. In contrast to an RQ
	 * the C is not nessecarily upper triangular.
	 * @param _C Output Tensor for the resulting R.
	 * @param _Q Output Tensor for the resulting Q.
	 * @param _input input Tensor of which the CQ shall be calculated.
	 * @param _splitPos index position at defining the matrification for which the CQ is calculated.
	 */
974
	void calculate_cq(Tensor& _C, Tensor& _Q, Tensor _input, const size_t _splitPos);
975
	
976
977
978
979
980
981
982
	/** 
	 * @brief Low-Level calculation of the pseudo inverse of a given Tensor.
	 * @details Currently doen by calculation of the SVD.
	 * @param _inverse Output the pseudo inverse.
	 * @param _input input Tensor of which the CQ shall be calculated.
	 * @param _splitPos index position at defining the matrification for which the pseudo inverse is calculated.
	 */
983
984
	void pseudo_inverse(Tensor& _inverse, const Tensor& _input, const size_t _splitPos);
	Tensor pseudo_inverse(const Tensor& _input, const size_t _splitPos);
985
	
986
	/** 
987
988
	 * @brief Solves the least squares problem ||@a _A @a _X - @a _B||_F.
	 * @param _X Output Tensor for the resulting X.
989
	 * @param _A input Tensor A.
990
	 * @param _B input Tensor b.
991
	 * @param _extraDegree number of modes that @a _X and @a _B share and for which the least squares problem is independently solved.
992
	 */
993
	void solve_least_squares(Tensor& _X, const Tensor& _A, const Tensor& _B, const size_t _extraDegree = 0);
994
	
995
996
997
998
999
1000
	/**
	 * @brief Solves the equation Ax = b for x. If the solution is not unique, the output need not be the minimal norm solution.
	 * @param _X Output Tensor for the result
	 * @param _A input Operator A
	 * @param _B input right-hand-side b
	 * @param _extraDegree number of modes that @a _x and @a _B sharefor which the solution should be computed independently.
For faster browsing, not all history is shown. View entire blame