tensor.cpp 62.2 KB
Newer Older
Baum's avatar
Baum committed
1
// Xerus - A General Purpose Tensor Library
Sebastian Wolf's avatar
Sebastian Wolf committed
2
// Copyright (C) 2014-2016 Benjamin Huber and Sebastian Wolf. 
Baum's avatar
Baum committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
// 
// Xerus is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
// 
// Xerus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// 
// You should have received a copy of the GNU Affero General Public License
// along with Xerus. If not, see <http://www.gnu.org/licenses/>.
//
// For further information on Xerus visit https://libXerus.org 
// or contact us at contact@libXerus.org.

20
/**
Sebastian Wolf's avatar
Sebastian Wolf committed
21
22
23
* @file
* @brief Implementation of the Tensor class.
*/
24

25
#include <xerus/tensor.h>
26
#include <xerus/misc/check.h>
Sebastian Wolf's avatar
Sebastian Wolf committed
27

Sebastian Wolf's avatar
Sebastian Wolf committed
28
#include <xerus/misc/containerSupport.h>
29
#include <xerus/misc/basicArraySupport.h>
Sebastian Wolf's avatar
Sebastian Wolf committed
30
#include <xerus/misc/math.h>
31
#include <xerus/misc/internal.h>
Sebastian Wolf's avatar
Sebastian Wolf committed
32

33
#include <xerus/blasLapackWrapper.h>
34
#include <xerus/cholmod_wrapper.h>
35
#include <xerus/sparseTimesFullContraction.h>
36
#include <xerus/index.h>
37
38
#include <xerus/tensorNetwork.h>

39
#include <xerus/cholmod_wrapper.h>
40

41
42
43
#include <fstream>
#include <iomanip>

Baum's avatar
Baum committed
44
namespace xerus {
45
46
	size_t Tensor::sparsityFactor = 4;
	
Sebastian Wolf's avatar
Sebastian Wolf committed
47
	/*- - - - - - - - - - - - - - - - - - - - - - - - - - Constructors - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
48
	
49
	Tensor::Tensor(const Representation _representation) : Tensor(DimensionTuple({}), _representation) { } 
50
	
51
	
52
53
	Tensor::Tensor(DimensionTuple _dimensions, const Representation _representation, const Initialisation _init) 
		: dimensions(std::move(_dimensions)), size(misc::product(dimensions)), representation(_representation)
54
	{
Sebastian Wolf's avatar
Sebastian Wolf committed
55
		REQUIRE(size != 0, "May not create tensors with an dimension == 0.");
56
57
58
		
		if(representation == Representation::Dense) {
			denseData.reset(new value_t[size], internal::array_deleter_vt);
Sebastian Wolf's avatar
Sebastian Wolf committed
59
			if(_init == Initialisation::Zero) {
60
				misc::set_zero(denseData.get(), size);
Sebastian Wolf's avatar
Sebastian Wolf committed
61
			}
62
63
64
		} else {
			sparseData.reset(new std::map<size_t, value_t>());
		}
Sebastian Wolf's avatar
Sebastian Wolf committed
65
66
67
	}
	
	
68
	Tensor::Tensor(DimensionTuple _dimensions, std::unique_ptr<value_t[]>&& _data)
69
70
71
	: dimensions(std::move(_dimensions)), size(misc::product(dimensions)), representation(Representation::Dense), denseData(std::move(_data)) {
		REQUIRE(size != 0, "May not create tensors with an dimension == 0.");
	}
Sebastian Wolf's avatar
Sebastian Wolf committed
72
	
73
74
	
	Tensor::Tensor(DimensionTuple _dimensions, const std::function<value_t()>& _f) : Tensor(std::move(_dimensions), Representation::Dense, Initialisation::None) {
75
76
77
78
79
80
		value_t* const realData = denseData.get();
		for (size_t i=0; i < size; ++i) {
			realData[i] = _f();
		}
	}
	
81
82
	
	Tensor::Tensor(DimensionTuple _dimensions, const std::function<value_t(const size_t)>& _f) : Tensor(std::move(_dimensions), Representation::Dense, Initialisation::None) {
83
84
85
86
87
88
		value_t* const realData = denseData.get();
		for (size_t i=0; i < size; ++i) {
			realData[i] = _f(i);
		}
	}
	
89
90
	
	Tensor::Tensor(DimensionTuple _dimensions, const std::function<value_t(const MultiIndex&)>& _f) : Tensor(std::move(_dimensions), Representation::Dense, Initialisation::None) {
91
		value_t* const realData = denseData.get();
92
		MultiIndex multIdx(degree(), 0);
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
		size_t idx = 0;
		while (true) {
			realData[idx] = _f(multIdx);
			// Increasing indices
			idx++;
			size_t changingIndex = degree()-1;
			multIdx[changingIndex]++;
			while(multIdx[changingIndex] == dimensions[changingIndex]) {
				multIdx[changingIndex] = 0;
				changingIndex--;
				// Return on overflow 
				if(changingIndex >= degree()) { return; }
				multIdx[changingIndex]++;
			}
		}
	}
	
110
111
	
	Tensor::Tensor(DimensionTuple _dimensions, const size_t _N, const std::function<std::pair<size_t, value_t>(size_t, size_t)>& _f) : Tensor(std::move(_dimensions), Representation::Sparse, Initialisation::Zero) {
112
		REQUIRE(_N <= size, "Cannot create more non zero entries that the dimension of the Tensor.");
Sebastian Wolf's avatar
Sebastian Wolf committed
113
		for (size_t i = 0; i < _N; ++i) {
114
115
116
117
118
119
			std::pair<size_t, value_t> entry = _f(i, size);
			REQUIRE(entry.first < size, "Postion is out of bounds " << entry.first);
			REQUIRE(!misc::contains(*sparseData, entry.first), "Allready contained " << entry.first);
			sparseData->insert(std::move(entry));
		}
	}
Sebastian Wolf's avatar
Sebastian Wolf committed
120
121
	
	
122
123
	Tensor Tensor::ones(DimensionTuple _dimensions) {
		Tensor ret(std::move(_dimensions), Representation::Dense, Initialisation::None);
124
		value_t* const data = ret.get_dense_data();
125
126
127
128
129
130
		for(size_t i = 0; i < ret.size; ++i) {
			data[i] = 1.0;
		}
		return ret;
	}
		
131
	Tensor Tensor::identity(DimensionTuple _dimensions) {
132
		REQUIRE(_dimensions.size()%2 == 0, "Identity tensor must have even degree, here: " << _dimensions.size());
133
134
		const size_t d = _dimensions.size();
		
135
136
		Tensor ret(std::move(_dimensions), Representation::Sparse);
		MultiIndex position(d, 0);
137
		
Sebastian Wolf's avatar
Sebastian Wolf committed
138
		if(d == 0) {
139
			ret[position] = 1.0;
Sebastian Wolf's avatar
Sebastian Wolf committed
140
141
142
143
144
145
146
		} else {
			bool notMultiLevelBreak = true;
			while(notMultiLevelBreak) {
				ret[position] = 1.0;
				
				position[0]++; position[d/2]++;
				size_t node = 0;
147
				while(position[node] == std::min(ret.dimensions[node], ret.dimensions[d/2+node])) {
Sebastian Wolf's avatar
Sebastian Wolf committed
148
149
150
151
152
					position[node] = position[d/2+node] = 0;
					node++;
					if(node == d/2) {notMultiLevelBreak = false; break;}
					position[node]++; position[d/2+node]++;
				}
153
154
			}
		}
155
156
157
		
		return ret;
	}
158
159
160
161
162
	
	
	Tensor Tensor::kronecker(DimensionTuple _dimensions) {
		Tensor ret(std::move(_dimensions), Representation::Sparse);
		if(ret.degree() == 0) {
Sebastian Wolf's avatar
Sebastian Wolf committed
163
164
165
			ret[{}] = 1.0;
		} else {
			for(size_t i = 0; i < misc::min(ret.dimensions); ++i) {
166
				ret[MultiIndex(ret.degree(), i)] = 1.0;
Sebastian Wolf's avatar
Sebastian Wolf committed
167
			}
168
169
170
		}
		return ret;
	}
171
172
173
174
	
	
	Tensor Tensor::dirac(DimensionTuple _dimensions, const MultiIndex& _position) {
		Tensor ret(std::move(_dimensions), Representation::Sparse);
175
176
177
		ret[_position] = 1.0;
		return ret;
	}
178
179
180
181
	
	
	Tensor Tensor::dirac(DimensionTuple _dimensions, const size_t _position) {
		Tensor ret(std::move(_dimensions), Representation::Sparse);
182
183
184
		ret[_position] = 1.0;
		return ret;
	}
Sebastian Wolf's avatar
Sebastian Wolf committed
185
186
187
	
	
	Tensor Tensor::dense_copy() const {
188
189
190
191
192
193
194
195
196
197
198
		Tensor ret(*this);
		ret.use_dense_representation();
		return ret;
	}
	
	
	Tensor Tensor::sparse_copy() const {
		Tensor ret(*this);
		ret.use_sparse_representation();
		return ret;
	}
Sebastian Wolf's avatar
Sebastian Wolf committed
199
	
200
201
202
203
204
	Tensor& Tensor::operator=(const TensorNetwork& _network) {
		return operator=(Tensor(_network));
	}
	
	
205
	/*- - - - - - - - - - - - - - - - - - - - - - - - - - Information - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
206
207
208
209
	size_t Tensor::degree() const {
		return dimensions.size();
	}
	
210
	
211
	bool Tensor::has_factor() const {
212
// 		return std::abs(1.0-factor) > std::numeric_limits<double>::epsilon();
213
214
215
216
217
218
		#pragma GCC diagnostic push
			#pragma GCC diagnostic ignored "-Wfloat-equal"
			return (factor != 1.0);
		#pragma GCC diagnostic pop
	}
	
219
	
220
	bool Tensor::is_dense() const {
221
		INTERNAL_CHECK((representation == Representation::Dense && denseData && !sparseData) || (representation == Representation::Sparse && sparseData && !denseData), "Internal Error: " << bool(representation) << bool(denseData) << bool(sparseData));
222
223
224
		return representation == Representation::Dense;
	}
	
225
	
226
	bool Tensor::is_sparse() const {
227
		INTERNAL_CHECK((representation == Representation::Dense && denseData && !sparseData) || (representation == Representation::Sparse && sparseData && !denseData), "Internal Error: " << bool(representation) << bool(denseData) << bool(sparseData));
228
229
		return representation == Representation::Sparse;
	}
230
	
231
	
232
	size_t Tensor::sparsity() const {
233
234
235
		if(is_sparse()) {
			return sparseData->size();
		}
236
		return size;
237
238
	}
	
239
	
240
	size_t Tensor::count_non_zero_entries(const value_t _eps) const {
241
		if(is_dense()) {
242
243
244
			size_t count = 0;
			for(size_t i = 0; i < size; ++i) {
				if(std::abs(denseData.get()[i]) > _eps ) { count++; }
245
			}
246
			return count;
247
		}
248
249
250
251
252
		size_t count = 0;
		for(const auto& entry : *sparseData) {
			if(std::abs(entry.second) > _eps) { count++; } 
		}
		return count;
253
254
	}
	
255
	
256
	bool Tensor::all_entries_valid() const {
257
		if(is_dense()) {
258
259
			for(size_t i = 0; i < size; ++i) {
				if(!std::isfinite(denseData.get()[i])) { return false; } 
260
261
			}
		} else {
262
			for(const auto& entry : *sparseData) {
263
				if(!std::isfinite(entry.second)) {return false; } 
264
265
			}
		}
266
		return true;
267
268
	}
	
269
	
270
	size_t Tensor::reorder_cost() const {
271
		return is_sparse() ? 10*sparsity() : size;
272
273
	}
	
274
	
275
	value_t Tensor::frob_norm() const {
276
		if(is_dense()) {
277
			return std::abs(factor)*blasWrapper::two_norm(denseData.get(), size);
278
		} 
279
			value_t norm = 0;
280
			for(const auto& entry : *sparseData) {
281
				norm += misc::sqr(entry.second);
282
			}
283
			return std::abs(factor)*sqrt(norm);
284
		
285
286
	}
	
287
	
288
	/*- - - - - - - - - - - - - - - - - - - - - - - - - - Basic arithmetics - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
289
290
291
292
293
	Tensor& Tensor::operator+=(const Tensor& _other) {
		plus_minus_equal<1>(*this, _other);
		return *this;
	}
	
294
	
295
296
297
298
299
	Tensor& Tensor::operator-=(const Tensor& _other) {
		plus_minus_equal<-1>(*this, _other);
		return *this;
	}
	
300
	
301
302
303
304
305
	Tensor& Tensor::operator*=(const value_t _factor) {
		factor *= _factor;
		return *this;
	}
	
306
	
307
308
309
310
	Tensor& Tensor::operator/=(const value_t _divisor) {
		factor /= _divisor;
		return *this;
	}
Sebastian Wolf's avatar
Sebastian Wolf committed
311
	
312
	
313
	/*- - - - - - - - - - - - - - - - - - - - - - - - - - Access - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
314
315
316
317
318
	value_t& Tensor::operator[](const size_t _position) {
		REQUIRE(_position < size, "Position " << _position << " does not exist in Tensor of dimensions " << dimensions);
		
		ensure_own_data_and_apply_factor();
		
319
		if(is_dense()) {
320
321
			return denseData.get()[_position];
		}
322
323
324
325
326
327
		value_t& result = (*sparseData)[_position];
		use_dense_representation_if_desirable();
		if (is_dense()) {
			return denseData.get()[_position];
		}
		return result;
328
	}
329
	
330
331
332
333

	value_t Tensor::operator[](const size_t _position) const {
		REQUIRE(_position < size, "Position " << _position << " does not exist in Tensor of dimensions " << dimensions);
		
334
		if(is_dense()) {
335
			return factor*denseData.get()[_position];
336
337
338
339
		} 
		const std::map<size_t, value_t>::const_iterator entry = sparseData->find(_position);
		if(entry == sparseData->end()) {
			return 0.0;
340
		}
341
		return factor*entry->second;
342
	}
343
344
	
	
345
	value_t& Tensor::operator[](const MultiIndex& _positions) {
346
347
348
		return operator[](multiIndex_to_position(_positions, dimensions));
	}
	
349
	
350
	value_t Tensor::operator[](const MultiIndex& _positions) const {
351
352
353
354
		return operator[](multiIndex_to_position(_positions, dimensions));
	}
	
	
355
356
357
358
359
360
361
	value_t& Tensor::at(const size_t _position) {
		REQUIRE(_position < size, "Position " << _position << " does not exist in Tensor of dimensions " << dimensions);
		REQUIRE(!has_factor(), "at() must not be called if there is a factor.");
		REQUIRE((is_dense() && denseData.unique()) || (is_sparse() && sparseData.unique()) , "Data must be unique to call at().");
		
		if(is_dense()) {
			return denseData.get()[_position];
362
		} 
363
364
365
366
			value_t& result = (*sparseData)[_position];
			use_dense_representation_if_desirable();
			if (is_dense()) {
				return denseData.get()[_position];
367
			} 
368
				return result;
369
370
			
		
371
372
	}
	
373
	
374
375
376
377
378
379
	value_t Tensor::cat(const size_t _position) const {
		REQUIRE(_position < size, "Position " << _position << " does not exist in Tensor of dimensions " << dimensions);
		REQUIRE(!has_factor(), "at() must not be called if there is a factor.");
		
		if(is_dense()) {
			return denseData.get()[_position];
380
		} 
381
382
383
			const std::map<size_t, value_t>::const_iterator entry = sparseData->find(_position);
			if(entry == sparseData->end()) {
				return 0.0;
384
			} 
385
				return entry->second;
386
387
			
		
388
389
	}
	
390
391
392
393
394
395
396
	
	value_t* Tensor::get_dense_data() {
		use_dense_representation();
		ensure_own_data_and_apply_factor();
		return denseData.get();
	}
	
397
	
398
	value_t* Tensor::get_unsanitized_dense_data() {
399
		REQUIRE(is_dense(), "Unsanitized dense data requested, but representation is not dense!");
400
401
402
		return denseData.get();
	}
	
403
	
404
	const value_t* Tensor::get_unsanitized_dense_data() const  {
405
		REQUIRE(is_dense(), "Unsanitized dense data requested, but representation is not dense!");
406
407
408
		return denseData.get();
	}
	
409
	
410
	value_t* Tensor::override_dense_data()  {
411
412
413
414
415
416
		factor = 1.0;
		if(!denseData.unique()) {
			sparseData.reset();
			denseData.reset(new value_t[size], internal::array_deleter_vt);
			representation = Representation::Dense;
		}
417
418
419
		return denseData.get();
	}
	
420
	
421
	const std::shared_ptr<value_t>& Tensor::get_internal_dense_data() {
422
		REQUIRE(is_dense(), "Internal dense data requested, but representation is not dense!");
423
424
425
		return denseData;
	}
	
426
427
428
429
430
431
432
433
	
	std::map<size_t, value_t>& Tensor::get_sparse_data() {
		CHECK(is_sparse(), warning, "Request for sparse data although the Tensor is not sparse.");
		use_sparse_representation();
		ensure_own_data_and_apply_factor();
		return *sparseData.get();
	}
	
434
	
435
436
437
438
439
	std::map<size_t, value_t>& Tensor::get_unsanitized_sparse_data() {
		REQUIRE(is_sparse(), "Unsanitized sparse data requested, but representation is not sparse!");
		return *sparseData.get();
	}
	
440
	
441
	const std::map<size_t, value_t>& Tensor::get_unsanitized_sparse_data() const  {
442
443
444
445
		REQUIRE(is_sparse(), "Unsanitized sparse data requested, but representation is not sparse!");
		return *sparseData.get();
	}
	
446
	
447
448
449
	std::map<size_t, value_t>& Tensor::override_sparse_data() {
		factor = 1.0;
		if(sparseData.unique()) {
450
			INTERNAL_CHECK(is_sparse(), "Internal Error");
451
452
453
454
455
456
457
			sparseData->clear();
		} else {
			denseData.reset();
			sparseData.reset(new std::map<size_t, value_t>());
			representation = Representation::Sparse;
		}
		
458
459
460
		return *sparseData.get();
	}
	
461
	
462
463
464
465
	const std::shared_ptr<std::map<size_t, value_t>>& Tensor::get_internal_sparse_data() {
		REQUIRE(is_sparse(), "Internal sparse data requested, but representation is not sparse!");
		return sparseData;
	}
466
	
467
	
Sebastian Wolf's avatar
Sebastian Wolf committed
468
469
470
471
472
	/*- - - - - - - - - - - - - - - - - - - - - - - - - - Indexing - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
	internal::IndexedTensor<Tensor> Tensor::operator()(const std::vector<Index>&  _indices) {
		return internal::IndexedTensor<Tensor>(this, _indices, false);
	}
	
473
	
Sebastian Wolf's avatar
Sebastian Wolf committed
474
475
476
477
	internal::IndexedTensor<Tensor> Tensor::operator()(      std::vector<Index>&& _indices) {
		return internal::IndexedTensor<Tensor>(this, std::move(_indices), false);
	}
	
478
	
Sebastian Wolf's avatar
Sebastian Wolf committed
479
480
481
482
	internal::IndexedTensorReadOnly<Tensor> Tensor::operator()(const std::vector<Index>&  _indices) const {
		return internal::IndexedTensorReadOnly<Tensor>(this, _indices);
	}
	
483
	
Sebastian Wolf's avatar
Sebastian Wolf committed
484
485
486
487
488
489
	internal::IndexedTensorReadOnly<Tensor> Tensor::operator()(      std::vector<Index>&& _indices) const {
		return internal::IndexedTensorReadOnly<Tensor>(this, std::move(_indices));
	}
	
	
	/*- - - - - - - - - - - - - - - - - - - - - - - - - - Modififiers - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
490
	void Tensor::reset(DimensionTuple _newDim, const Representation _representation, const Initialisation _init) {
491
		const size_t oldDataSize = size;
492
		dimensions = std::move(_newDim);
493
494
495
496
		size = misc::product(dimensions);
		factor = 1.0;
		
		if(_representation == Representation::Dense) {
Sebastian Wolf's avatar
Sebastian Wolf committed
497
			if(representation == Representation::Dense) {
498
499
500
501
502
503
504
505
506
507
508
509
510
				if(oldDataSize != size || !denseData.unique()) {
					denseData.reset(new value_t[size], internal::array_deleter_vt);
				}
			} else {
				sparseData.reset();
				denseData.reset(new value_t[size], internal::array_deleter_vt);
				representation = _representation;
			}
			
			if(_init == Initialisation::Zero) {
				memset(denseData.get(), 0, size*sizeof(value_t));
			}
		} else {
Sebastian Wolf's avatar
Sebastian Wolf committed
511
			if(representation == Representation::Sparse) {
512
513
514
515
516
517
518
519
520
521
522
523
524
				if(sparseData.unique()) {
					sparseData->clear();
				} else {
					sparseData.reset(new std::map<size_t, value_t>());
				}
			} else {
				denseData.reset();
				sparseData.reset(new std::map<size_t, value_t>());
				representation = _representation;
			}
		}
	}
	
525
526
	
	void Tensor::reset(DimensionTuple _newDim, const Initialisation _init) {
527
		const size_t oldDataSize = size;
528
		dimensions = std::move(_newDim);
529
530
531
		size = misc::product(dimensions);
		factor = 1.0;
		
Sebastian Wolf's avatar
Sebastian Wolf committed
532
		if(representation == Representation::Dense) {
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
			if(oldDataSize != size || !denseData.unique()) {
				denseData.reset(new value_t[size], internal::array_deleter_vt);
			}
			
			if(_init == Initialisation::Zero) {
				memset(denseData.get(), 0, size*sizeof(value_t));
			}
		} else {
			if(sparseData.unique()) {
				sparseData->clear();
			} else {
				sparseData.reset(new std::map<size_t, value_t>());
			}
		}
	}
	
Sebastian Wolf's avatar
Sebastian Wolf committed
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
	void Tensor::reset() {
		dimensions.clear();
		factor = 1.0;
		
		if(representation == Representation::Dense) {
			if(size != 1 || !denseData.unique()) {
				denseData.reset(new value_t[1], internal::array_deleter_vt);
			}
			*denseData.get() = 0.0;
		} else {
			if(sparseData.unique()) {
				sparseData->clear();
			} else {
				sparseData.reset(new std::map<size_t, value_t>());
			}
		}
		size = 1;
	}
	
568
569
570
	
	void Tensor::reset(DimensionTuple _newDim, const std::shared_ptr<value_t>& _newData) {
		dimensions = std::move(_newDim);
571
572
573
		size = misc::product(dimensions);
		factor = 1.0;
		
Sebastian Wolf's avatar
Sebastian Wolf committed
574
		if(representation == Representation::Sparse) {
575
576
577
578
579
580
581
			sparseData.reset();
			representation = Representation::Dense;
		}
		
		denseData = _newData;
	}
	
582
583
584
	
	void Tensor::reset(DimensionTuple _newDim, std::unique_ptr<value_t[]>&& _newData) {
		dimensions = std::move(_newDim);
585
586
587
		size = misc::product(dimensions);
		factor = 1.0;
		
Sebastian Wolf's avatar
Sebastian Wolf committed
588
		if(representation == Representation::Sparse) {
589
590
591
592
593
594
595
			sparseData.reset();
			representation = Representation::Dense;
		}
		
		denseData = std::move(_newData);
	}
	
596
597
598
599
600
601
602
603
604
605
606
607
608
	void Tensor::reset(DimensionTuple _newDim, std::map<size_t, value_t>&& _newData) {
		dimensions = std::move(_newDim);
		size = misc::product(dimensions);
		factor = 1.0;
		
		if(representation == Representation::Dense) {
			denseData.reset();
			representation = Representation::Sparse;
		}
		
		std::shared_ptr<std::map<size_t, value_t>> newD(new std::map<size_t, value_t>(std::move(_newData)));
		sparseData = std::move(newD);
	}
609
610
		
	void Tensor::reinterpret_dimensions(DimensionTuple _newDimensions) {
611
		REQUIRE(misc::product(_newDimensions) == size, "New dimensions must not change the size of the tensor in reinterpretation: " << misc::product(_newDimensions) << " != " << size);
612
		dimensions = std::move(_newDimensions); // NOTE pass-by-value
613
614
	}
	
615
	
616
617
	void Tensor::resize_mode(const size_t _mode, const size_t _newDim, size_t _cutPos) {
		REQUIRE(_mode < degree(), "Can't resize mode " << _mode << " as the tensor is only order " << degree());
618

619
		if (dimensions[_mode] == _newDim) { return; }  // Trivial case: Nothing to do
620

621
		const size_t oldDim = dimensions[_mode];
622
		_cutPos = std::min(_cutPos, oldDim);
623
		
624
625
		REQUIRE(_newDim > 0, "Dimension must be larger than 0! Is " << _newDim);
		REQUIRE(_newDim > oldDim || _cutPos >= oldDim -_newDim, "Cannot remove " << oldDim -_newDim << " slates starting (exclusivly) at position " << _cutPos);
626
		
627
		const size_t dimStepSize = misc::product(dimensions, _mode+1, degree());
628
		const size_t oldStepSize = oldDim*dimStepSize;
629
630
631
		const size_t newStepSize = _newDim*dimStepSize;
		const size_t blockCount = size / oldStepSize; //  == misc::product(dimensions, 0, _n);
		const size_t newsize = blockCount*newStepSize;
632
		
633
		if(is_dense()) {
634
			std::unique_ptr<value_t[]> tmpData(new value_t[newsize]);
635
			
636
637
638
639
640
			if (_newDim > oldDim) { // Add new slates
				const size_t insertBlockSize = (_newDim-oldDim)*dimStepSize;
				if(_cutPos == 0) {
					for ( size_t i = 0; i < blockCount; ++i ) {
						value_t* const currData = tmpData.get()+i*newStepSize;
641
642
						misc::set_zero(currData, insertBlockSize);
						misc::copy(currData+insertBlockSize, denseData.get()+i*oldStepSize, oldStepSize);
643
644
645
646
					}
				} else if(_cutPos == oldDim) {
					for ( size_t i = 0; i < blockCount; ++i ) {
						value_t* const currData = tmpData.get()+i*newStepSize;
647
648
						misc::copy(currData, denseData.get()+i*oldStepSize, oldStepSize);
						misc::set_zero(currData+oldStepSize, insertBlockSize);
649
650
					}
				} else {
651
652
					const size_t preBlockSize = _cutPos*dimStepSize;
					const size_t postBlockSize = (oldDim-_cutPos)*dimStepSize;
653
					for (size_t i = 0; i < blockCount; ++i) {
654
						value_t* const currData = tmpData.get()+i*newStepSize;
655
656
657
						misc::copy(currData, denseData.get()+i*oldStepSize, preBlockSize);
						misc::set_zero(currData+preBlockSize, insertBlockSize);
						misc::copy(currData+preBlockSize+insertBlockSize, denseData.get()+i*oldStepSize+preBlockSize, postBlockSize);
658
					}
659
				}
660
661
662
663
664
665
666
			} else { // Remove slates
				if (_cutPos < oldDim) {
					const size_t removedSlates = (oldDim-_newDim);
					const size_t removedBlockSize = removedSlates*dimStepSize;
					const size_t preBlockSize = (_cutPos-removedSlates)*dimStepSize;
					const size_t postBlockSize = (oldDim-_cutPos)*dimStepSize;
					
667
					INTERNAL_CHECK(removedBlockSize+preBlockSize+postBlockSize == oldStepSize && preBlockSize+postBlockSize == newStepSize, "IE");
668
					
669
					for (size_t i = 0; i < blockCount; ++i) {
670
						value_t* const currData = tmpData.get()+i*newStepSize;
671
672
						misc::copy(currData, denseData.get()+i*oldStepSize, preBlockSize);
						misc::copy(currData+preBlockSize, denseData.get()+i*oldStepSize+preBlockSize+removedBlockSize, postBlockSize);
673
674
675
					}
				} else {
					for (size_t i = 0; i < blockCount; ++i) {
676
						misc::copy(tmpData.get()+i*newStepSize, denseData.get()+i*oldStepSize, newStepSize);
677
					}
678
679
				}
			}
680
			denseData.reset(tmpData.release(), internal::array_deleter_vt);
681
		
682
		} else {
683
			std::unique_ptr<std::map<size_t, value_t>> tmpData(new std::map<size_t, value_t>());
684
			
685
686
			if (_newDim > oldDim) { // Add new slates
				const size_t slatesAdded = _newDim-oldDim;
687
				for(const auto& entry : *sparseData.get()) {
688
689
690
691
692
					// Decode the position as i*oldStepSize + j*DimStepSize + k
					const size_t k = entry.first%dimStepSize;
					const size_t j = (entry.first%oldStepSize)/dimStepSize;
					const size_t i = entry.first/oldStepSize;
					
693
694
					if(j < _cutPos) { // Entry remains at current position j
						tmpData->emplace(i*newStepSize+j*dimStepSize+k, entry.second);
695
					} else { // Entry moves to position j+slatesAdded
696
						tmpData->emplace(i*newStepSize+(j+slatesAdded)*dimStepSize+k, entry.second);
697
698
					}
				}
699
700
			} else { // Remove slates
				const size_t slatesRemoved = oldDim-_newDim;
701
				for(const auto& entry : *sparseData.get()) {
702
703
704
705
706
					// Decode the position as i*oldStepSize + j*DimStepSize + k
					const size_t k = entry.first%dimStepSize;
					const size_t j = (entry.first%oldStepSize)/dimStepSize;
					const size_t i = entry.first/oldStepSize;
					
707
708
709
710
					if(j < _cutPos-slatesRemoved) { // Entry remains at current position j
						tmpData->emplace(i*newStepSize+j*dimStepSize+k, entry.second);
					} else if(j >= _cutPos) { // Entry advances in position j
						tmpData->emplace(i*newStepSize+(j-slatesRemoved)*dimStepSize+k, entry.second);
711
					}
712
713
				}
			}
714
			sparseData.reset(tmpData.release());
715
		}
716
		
717
		dimensions[_mode] = _newDim;
718
719
720
		size = newsize;
	}
	
721
	
722
	void Tensor::fix_mode(const size_t _mode, const size_t _slatePosition) {
723
		REQUIRE(_slatePosition < dimensions[_mode], "The given slatePosition must be smaller than the corresponding dimension. Here " << _slatePosition << " >= " << dimensions[_mode] << ", dim = " << dimensions << "=" << size << " mode " << _mode);
724
		
725
726
727
		const size_t stepCount = misc::product(dimensions, 0, _mode);
		const size_t blockSize = misc::product(dimensions, _mode+1, degree());
		const size_t stepSize = dimensions[_mode]*blockSize;
728
		const size_t slateOffset = _slatePosition*blockSize;
729
730
		
		if(is_dense()) {
731
			std::unique_ptr<value_t[]> tmpData(new value_t[stepCount*blockSize]);
732
733
734
			
			// Copy data
			for(size_t i = 0; i < stepCount; ++i) {
735
				misc::copy(tmpData.get()+i*blockSize, denseData.get()+i*stepSize+slateOffset, blockSize);
736
737
			}
			
738
			denseData.reset(tmpData.release(), &internal::array_deleter_vt);
739
		} else {
740
741
			std::unique_ptr<std::map<size_t, value_t>> tmpData(new std::map<size_t, value_t>());
			
742
			for(const auto& entry : *sparseData.get()) {
743
744
745
746
747
748
749
750
751
752
753
				// Decode the position as i*stepSize + j*blockSize + k
				const size_t k = entry.first%blockSize;
				const size_t j = (entry.first%stepSize)/blockSize;
				const size_t i = entry.first/stepSize;
				
				if(j == _slatePosition) {
					tmpData->emplace(i*blockSize+k, entry.second);
				}
			}
			
			sparseData.reset(tmpData.release());
754
		}
755
756
		
		// Adjust dimensions
757
		dimensions.erase(dimensions.begin()+long(_mode));
758
		size = stepCount*blockSize;
759
760
	}
	
761
	
762
763
764
765
	void Tensor::remove_slate(const size_t _mode, const size_t _pos) {
		REQUIRE(_mode < degree(), "");
		REQUIRE(_pos < dimensions[_mode], _pos << " " << dimensions[_mode]);
		REQUIRE(dimensions[_mode] > 1, "");
766
		
767
		resize_mode(_mode, dimensions[_mode]-1, _pos+1);
768
769
	}
	
770
	
Sebastian Wolf's avatar
Sebastian Wolf committed
771
772
773
774
775
776
	void Tensor::perform_trace(size_t _firstIndex, size_t _secondIndex) {
		REQUIRE(_firstIndex != _secondIndex, "Given indices must not coincide");
		REQUIRE(dimensions[_firstIndex] == dimensions[_secondIndex], "Dimensions of trace indices must coincide.");
		
		if(_firstIndex > _secondIndex) { std::swap(_firstIndex, _secondIndex); }
		
777
778
		
		
Sebastian Wolf's avatar
Sebastian Wolf committed
779
780
781
782
783
784
785
786
787
788
		const size_t front = misc::product(dimensions, 0, _firstIndex);
		const size_t mid = misc::product(dimensions, _firstIndex+1, _secondIndex);
		const size_t back = misc::product(dimensions, _secondIndex+1, degree());
		const size_t traceDim = dimensions[_firstIndex];
		const size_t frontStepSize = traceDim*mid*traceDim*back;
		const size_t traceStepSize = mid*traceDim*back+back;
		const size_t midStepSize = traceDim*back;
		
		size = front*mid*back;
		
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
		if(is_dense()) {
			std::unique_ptr<value_t[]> newData(new value_t[size]);
			misc::set_zero(newData.get(), size);
			
			for(size_t f = 0; f < front; ++f) {
				for(size_t t = 0; t < traceDim; ++t) {
					for(size_t m = 0; m < mid; ++m) {
						misc::add_scaled(newData.get()+(f*mid+m)*back, factor, denseData.get()+f*frontStepSize+t*traceStepSize+m*midStepSize, back);
					}
				}
			}
			
			denseData.reset(newData.release(), internal::array_deleter_vt);
		} else {
			std::unique_ptr<std::map<size_t, value_t>> newData( new std::map<size_t, value_t>());
			
805
			for(const auto& entry : *sparseData) {
806
807
808
809
810
811
812
813
814
815
816
817
818
				size_t pos = entry.first;
				const size_t backIdx = pos%back;
				pos /= back;
				const size_t traceBackIdx = pos%traceDim;
				pos /= traceDim;
				const size_t midIdx = pos%mid;
				pos /= mid;
				const size_t traceFrontIdx = pos%traceDim;
				pos /= traceDim;
				const size_t frontIdx = pos;
				
				if(traceFrontIdx == traceBackIdx) {
					(*newData)[(frontIdx*mid + midIdx)*back + backIdx] += factor*entry.second;
Sebastian Wolf's avatar
Sebastian Wolf committed
819
820
				}
			}
821
822
			
			sparseData.reset(newData.release());
Sebastian Wolf's avatar
Sebastian Wolf committed
823
824
		}
		
825
826
		dimensions.erase(dimensions.begin()+_secondIndex);
		dimensions.erase(dimensions.begin()+_firstIndex);
Sebastian Wolf's avatar
Sebastian Wolf committed
827
828
829
		factor = 1.0;
	}
	
830
	
831
	void Tensor::modify_diagonal_entries(const std::function<void(value_t&)>& _f) {
832
		ensure_own_data_and_apply_factor();
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
		
		if(degree() == 0) {
			_f(at(0)); 
		} else {
			size_t stepSize = 1;
			for(size_t i = 1; i < degree(); ++i) {
				stepSize *= dimensions[i];
				stepSize += 1;
			}
			
			const size_t numDiags = misc::min(dimensions);
			
			for(size_t i = 0; i < numDiags; ++i){
				_f(at(i*stepSize));
			}
848
849
850
		}
	}
	
851
	
852
	void Tensor::modify_diagonal_entries(const std::function<void(value_t&, const size_t)>& _f) {
853
		ensure_own_data_and_apply_factor();
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
		
		if(degree() == 0) {
			_f(at(0), 0); 
		} else {
			size_t stepSize = 1;
			for(size_t i = 1; i < degree(); ++i) {
				stepSize *= dimensions[i];
				stepSize += 1;
			}
			
			const size_t numDiags = misc::min(dimensions);
			
			for(size_t i = 0; i < numDiags; ++i){
				_f(at(i*stepSize), i);
			}
869
870
871
		}
	}
	
872
	
873
	void Tensor::modify_entries(const std::function<void(value_t&)>& _f) {
874
		ensure_own_data_and_apply_factor();
875
876
877
878
879
880
881
		if(is_dense()) {
			for(size_t i = 0; i < size; ++i) { _f(at(i)); }
		} else {
			for(size_t i = 0; i < size; ++i) {
				value_t val = cat(i);
				const value_t oldVal = val;
				_f(val);
Sebastian Wolf's avatar
Sebastian Wolf committed
882
				if(misc::hard_not_equal(val, 0.0)) {
883
					at(i) = val;
Sebastian Wolf's avatar
Sebastian Wolf committed
884
				} else if( misc::hard_not_equal(oldVal, 0.0)) {
885
					IF_CHECK( size_t succ =) sparseData->erase(i);
886
					INTERNAL_CHECK(succ == 1, "Internal Error");
887
888
889
				}
			}
		}
890
	}
891
	
892

893
	void Tensor::modify_entries(const std::function<void(value_t&, const size_t)>& _f) {
894
895
896
897
898
899
900
901
		ensure_own_data_and_apply_factor();
		if(is_dense()) {
			for(size_t i = 0; i < size; ++i) { _f(at(i), i); }
		} else {
			for(size_t i = 0; i < size; ++i) {
				value_t val = cat(i);
				const value_t oldVal = val;
				_f(val, i);
Sebastian Wolf's avatar
Sebastian Wolf committed
902
				if(misc::hard_not_equal(val, 0.0)) {
903
					at(i) = val;
Sebastian Wolf's avatar
Sebastian Wolf committed
904
				} else if( misc::hard_not_equal(oldVal, 0.0)) {
905
					IF_CHECK( size_t succ =) sparseData->erase(i);
906
					INTERNAL_CHECK(succ == 1, "Internal Error");
907
908
909
910
				}
			}
		}
	}
911
	
912
	
913
	void Tensor::modify_entries(const std::function<void(value_t&, const MultiIndex&)>& _f) {
914
915
		ensure_own_data_and_apply_factor();
		
916
		MultiIndex multIdx(degree(), 0);
917
918
		size_t idx = 0;
		while (true) {
919
			if(is_dense()) {
920
				_f(at(idx), multIdx);
921
			} else {
922
923
924
				value_t val = cat(idx);
				const value_t oldVal = val;
				_f(val, multIdx);
Sebastian Wolf's avatar
Sebastian Wolf committed
925
				if(misc::hard_not_equal(val, 0.0)) {
926
					at(idx) = val;
Sebastian Wolf's avatar
Sebastian Wolf committed
927
				} else if( misc::hard_not_equal(oldVal, 0.0)) {
928
					IF_CHECK( size_t succ =) sparseData->erase(idx);
929
					INTERNAL_CHECK(succ == 1, "Internal Error");
930
931
				}
			}
932
			
933
934
935
936
937
938
939
940
941
			// Increase indices
			idx++;
			size_t changingIndex = degree()-1;
			multIdx[changingIndex]++;
			while(multIdx[changingIndex] == dimensions[changingIndex]) {
				multIdx[changingIndex] = 0;
				changingIndex--;
				// Return on overflow 
				if(changingIndex >= degree()) { return; }
942
943
944
				multIdx[changingIndex]++;
			}
		}
945
	}
Sebastian Wolf's avatar
Sebastian Wolf committed
946
	
947
	
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
	inline std::vector<size_t> get_step_sizes(const Tensor::DimensionTuple& _dimensions) {
		std::vector<size_t> stepSizes(_dimensions.size());
		if(!_dimensions.empty()) {
			stepSizes.back() = 1;
			for(size_t i = stepSizes.size(); i > 1; --i) {
				stepSizes[i-2] = stepSizes[i-1]*_dimensions[i-1];
			}
		}
		return stepSizes;
	}
	
	void Tensor::offset_add(const Tensor& _other, const std::vector<size_t>& _offsets) {
		IF_CHECK(
			REQUIRE(degree() == _other.degree(), "Degrees of the this and the given tensor must coincide. Here " << degree() << " vs " << _other.degree());
			REQUIRE(degree() == _offsets.size(), "Degrees of the this tensor and number of given offsets must coincide. Here " << degree() << " vs " << _offsets.size());
			for(size_t d = 0; d < degree(); ++d) {
				REQUIRE(dimensions[d] >= _offsets[d]+_other.dimensions[d], "Invalid offset/tensor dimension for dimension " << d << " because this dimension is " << dimensions[d] << " but other tensor dimension + offset is " << _offsets[d]+_other.dimensions[d]);
			}
		)
		
968
969
970
971
		if(_other.is_dense()) {
			use_dense_representation();
			
			const std::vector<size_t> stepSizes = get_step_sizes(dimensions);
972
		
973
974
975
976
			// Calculate the actual offset
			size_t offset = 0;
			for(size_t d = 0; d < degree(); ++d) {
				offset += _offsets[d]*stepSizes[d];
977