Commit e9843bd7 authored by Philipp  Trunschke's avatar Philipp Trunschke

try git filter renormalization

parent ef66310c
......@@ -60,7 +60,7 @@ TTOperator create_operator(const size_t _order) {
Sstar(j, k)*Tensor::dirac({1, 3}, 0)(i, l)
+ L(j, k)*Tensor::dirac({1, 3}, 1)(i, l)
+ I(j, k)*Tensor::dirac({1, 3}, 2)(i, l);
A.set_component(0, comp);
// Create middle components
......@@ -80,7 +80,7 @@ TTOperator create_operator(const size_t _order) {
I(j, k)*Tensor::dirac({3, 1}, 0)(i, l)
+ M(j, k)*Tensor::dirac({3, 1}, 1)(i, l)
+ S(j, k)*Tensor::dirac({3, 1}, 2)(i, l);
A.set_component(_order-1, comp);
return A;
......@@ -105,10 +105,10 @@ std::vector<TTTensor> implicit_euler(const TTOperator& _A, TTTensor _x,
std::vector<TTTensor> results;
TTTensor nextX = _x;
results.push_back(_x);
for(size_t i = 0; i < _n; ++i) {
ourALS(op, nextX, _x);
// Normalize
double norm = one_norm(nextX);
nextX /= norm;
......
......@@ -2,10 +2,10 @@
#include <boost/version.hpp>
int main() {
std::cout << "Boost "
<< BOOST_VERSION / 100000 << "." // major version
<< BOOST_VERSION / 100 % 1000 << "." // minor version
<< BOOST_VERSION % 100 // patch level
<< std::endl;
return 0;
std::cout << "Boost "
<< BOOST_VERSION / 100000 << "." // major version
<< BOOST_VERSION / 100 % 1000 << "." // minor version
<< BOOST_VERSION % 100 // patch level
<< std::endl;
return 0;
}
......@@ -6,13 +6,13 @@ import numpy as np
class TestArithmetic(unittest.TestCase):
def setUp(self):
self.dimension = [10, 15, 20]
self.ranks = [1, 8, 4, 1]
def setUp(self):
self.dimension = [10, 15, 20]
self.ranks = [1, 8, 4, 1]
def test_tensor_linear_combination(self):
ten_a = xe.Tensor.ones(self.dimension)
ten_b = xe.Tensor.ones(self.dimension) * 4
ten_c = ten_a + ten_b
for (i, j, k) in ranges(*self.dimension):
self.assertAlmostEqual(ten_c[i, j, k], 5)
def test_tensor_linear_combination(self):
ten_a = xe.Tensor.ones(self.dimension)
ten_b = xe.Tensor.ones(self.dimension) * 4
ten_c = ten_a + ten_b
for (i, j, k) in ranges(*self.dimension):
self.assertAlmostEqual(ten_c[i, j, k], 5)
......@@ -4,38 +4,38 @@ import xerus as xe
def generate_random_tttensors(num_tests, max_order=4, max_dimension=30, max_rank=30, random=None):
orders = random.randint(low=1, high=max_order+1, size=num_tests)
dimensions = [random.randint(low=1, high=max_dimension, size=order) for order in orders]
ranks = [random.randint(low=1, high=max_rank, size=(order-1)) for order in orders]
for dim, rk in zip(dimensions, ranks):
yield xe.TTTensor.random(dim.tolist(), rk.tolist())
orders = random.randint(low=1, high=max_order+1, size=num_tests)
dimensions = [random.randint(low=1, high=max_dimension, size=order) for order in orders]
ranks = [random.randint(low=1, high=max_rank, size=(order-1)) for order in orders]
for dim, rk in zip(dimensions, ranks):
yield xe.TTTensor.random(dim.tolist(), rk.tolist())
def single_test(A):
mirrored = lambda pos: A.degree()-1-pos
name_d = "-".join(map(str, A.dimensions))
name_r = "-".join(map(str, A.ranks()))
name = "test_chop_{}_{}".format(name_d, name_r)
def test_chop(self):
L,l,e,r,R = xe.indices(5)
for corePosition in range(A.degree()):
Al,Ar = A.chop(corePosition)
Ac = A.get_component(corePosition)
res = xe.TensorNetwork()
res(L^corePosition,e,R^mirrored(corePosition)) << Al(L&1,l) * Ac(l,e,r) * Ar(r,R&1)
# norm(A - res)**2 == norm(A)**2 - 2*inner(A,res) + norm(res)**2
nA = xe.frob_norm(A)
nres = xe.frob_norm(res)
inner = xe.Tensor()
inner() << A(e&0) * res(e&0)
self.assertLessEqual(nA**2 - 2*inner[0] + nres**2, 5e-5)
return name, test_chop
mirrored = lambda pos: A.degree()-1-pos
name_d = "-".join(map(str, A.dimensions))
name_r = "-".join(map(str, A.ranks()))
name = "test_chop_{}_{}".format(name_d, name_r)
def test_chop(self):
L,l,e,r,R = xe.indices(5)
for corePosition in range(A.degree()):
Al,Ar = A.chop(corePosition)
Ac = A.get_component(corePosition)
res = xe.TensorNetwork()
res(L^corePosition,e,R^mirrored(corePosition)) << Al(L&1,l) * Ac(l,e,r) * Ar(r,R&1)
# norm(A - res)**2 == norm(A)**2 - 2*inner(A,res) + norm(res)**2
nA = xe.frob_norm(A)
nres = xe.frob_norm(res)
inner = xe.Tensor()
inner() << A(e&0) * res(e&0)
self.assertLessEqual(nA**2 - 2*inner[0] + nres**2, 5e-5)
return name, test_chop
def build_TestChop(seed, num_tests):
random = np.random.RandomState(seed)
odir = dict(single_test(t) for t in generate_random_tttensors(num_tests, random=random))
return type("TestChop", (unittest.TestCase,), odir)
random = np.random.RandomState(seed)
odir = dict(single_test(t) for t in generate_random_tttensors(num_tests, random=random))
return type("TestChop", (unittest.TestCase,), odir)
TestChop = build_TestChop(0, 100)
......@@ -10,39 +10,39 @@ ranges = lambda *args: _iter.product(*[range(arg) for arg in args])
class TestExtendedTT(unittest.TestCase):
def setUp(self):
self.dimension = [4, 6, 8]
self.ranks = [1, 8, 5, 1]
def test_from_function_set_component(self):
tt = xe.TTTensor(self.dimension)
arrs = []
for i in range(len(self.dimension)):
shape = [self.ranks[i], self.dimension[i], self.ranks[i+1]]
arr = np.random.randn(*shape)
x = xe.Tensor.from_function(shape, lambda x: arr[tuple(x)])
tt.set_component(i, x)
arrs.append(arr)
for i in range(len(self.dimension)):
self.assertTrue(np.all(arrs[i] == tt.get_component(i).to_ndarray()))
def test_eval_hermite(self):
from numpy.polynomial.hermite_e import hermeval
basis = xe.PolynomBasis.Hermite
# TODO: check with numpy hermite...
tt = xe.TTTensor(self.dimension)
arrs = []
for i in range(len(self.dimension)):
shape = [self.ranks[i], self.dimension[i], self.ranks[i+1]]
arr = np.random.randn(*shape)
x = xe.Tensor.from_function(shape, lambda x: arr[tuple(x)])
tt.set_component(i, x)
arrs.append(arr)
for (i, j, k) in ranges(*self.dimension):
x = xe.uq_tt_evaluate(tt, [j, k], basis)
x = x[[i]]
# loc_extt = extt([i, j, k])
# self.assertLessEqual(np.abs((loc_extt - loc_xett)/loc_extt), 1e-10)
def setUp(self):
self.dimension = [4, 6, 8]
self.ranks = [1, 8, 5, 1]
def test_from_function_set_component(self):
tt = xe.TTTensor(self.dimension)
arrs = []
for i in range(len(self.dimension)):
shape = [self.ranks[i], self.dimension[i], self.ranks[i+1]]
arr = np.random.randn(*shape)
x = xe.Tensor.from_function(shape, lambda x: arr[tuple(x)])
tt.set_component(i, x)
arrs.append(arr)
for i in range(len(self.dimension)):
self.assertTrue(np.all(arrs[i] == tt.get_component(i).to_ndarray()))
def test_eval_hermite(self):
from numpy.polynomial.hermite_e import hermeval
basis = xe.PolynomBasis.Hermite
# TODO: check with numpy hermite...
tt = xe.TTTensor(self.dimension)
arrs = []
for i in range(len(self.dimension)):
shape = [self.ranks[i], self.dimension[i], self.ranks[i+1]]
arr = np.random.randn(*shape)
x = xe.Tensor.from_function(shape, lambda x: arr[tuple(x)])
tt.set_component(i, x)
arrs.append(arr)
for (i, j, k) in ranges(*self.dimension):
x = xe.uq_tt_evaluate(tt, [j, k], basis)
x = x[[i]]
# loc_extt = extt([i, j, k])
# self.assertLessEqual(np.abs((loc_extt - loc_xett)/loc_extt), 1e-10)
......@@ -6,26 +6,26 @@ import numpy as np
class TestNumpyInterface(unittest.TestCase):
def setUp(self):
self.dimension = [10, 15, 20]
self.ranks = [1, 8, 4, 1]
def setUp(self):
self.dimension = [10, 15, 20]
self.ranks = [1, 8, 4, 1]
def test_to_ndarray(self):
ten = xe.TTTensor.random(self.dimension, self.ranks[1:-1])
for lia in range(ten.degree()):
comp_ten = ten.get_component(lia)
comp_dim = comp_ten.dimensions
comp_nd = comp_ten.to_ndarray()
for (i, j, k) in ranges(*comp_dim):
self.assertEqual(comp_ten[[i,j,k]], comp_nd[i,j,k])
def test_to_ndarray(self):
ten = xe.TTTensor.random(self.dimension, self.ranks[1:-1])
for lia in range(ten.degree()):
comp_ten = ten.get_component(lia)
comp_dim = comp_ten.dimensions
comp_nd = comp_ten.to_ndarray()
for (i, j, k) in ranges(*comp_dim):
self.assertEqual(comp_ten[[i,j,k]], comp_nd[i,j,k])
def test_from_ndarray(self):
arr = np.random.randn(50,50)
ten = xe.Tensor.from_ndarray(arr)
for lia in range(arr.shape[0]):
for lib in range(arr.shape[1]):
self.assertEqual(ten[[lia, lib]], arr[lia,lib])
def test_from_ndarray(self):
arr = np.random.randn(50,50)
ten = xe.Tensor.from_ndarray(arr)
for lia in range(arr.shape[0]):
for lib in range(arr.shape[1]):
self.assertEqual(ten[[lia, lib]], arr[lia,lib])
if __name__ == '__main__':
unittest.main()
unittest.main()
......@@ -4,129 +4,129 @@ import numpy as np
from numpy.polynomial.legendre import legval
class TestReconstruction(unittest.TestCase):
def test_small_reconstruction_weighted(self):
# the function to approximate
def fnc(x, y):
return np.sin(2*np.pi*x)*(y[0] + 0.1*y[1]**2) + np.cos(2*np.pi*x)*y[1]
x_dim = 100
y_dim = 2
n_samples = 10000
n_test_samples = 100
deg = 2
basis = xe.PolynomBasis.Legendre
x = np.linspace(0, 1, x_dim)
def discretized_fnc(y):
return fnc(x, y)
path = os.path.join(os.path.dirname(__file__), "cm_samples.npz")
cm_samples = np.load(path)
nodes = cm_samples["samples"][:n_samples]
values = [xe.Tensor.from_ndarray(discretized_fnc(y)) for y in nodes]
vector = lambda x: xe.Tensor.from_ndarray(legval(x, np.eye(deg+1)))
measurements = [[vector(ni) for ni in node] for node in nodes]
weights = cm_samples["weights"][:n_samples]
dimension = [x_dim] + [deg+1]*y_dim
reco = xe.uq_ra_adf(measurements, values, weights, dimension, targeteps=1e-8, maxitr=70)
#TODO: implement a xerus function: tt_evaluate(tt, pos, pos2meas) where pos2meas is a function pos2meas(int mode, int idx, pos) that calculates the idx-th basis function in the given mode
#TODO: implement a xerus function: measurements(pos_vector, pos2meas)
test_nodes = 2*np.random.rand(n_test_samples, y_dim)-1
error = 0
for y in test_nodes:
res = xe.uq_tt_evaluate(reco, y, basis).to_ndarray()
ref = discretized_fnc(y)
error += np.linalg.norm(res - ref)**2 / np.linalg.norm(ref)**2
error = np.sqrt(error) / n_test_samples
self.assertLessEqual(error, 1e-3)
def test_small_reconstruction_explicit(self):
# the function to approximate
def fnc(x, y):
return np.sin(2*np.pi*x)*(y[0] + 0.1*y[1]**2) + np.cos(2*np.pi*x)*y[1]
x_dim = 100
y_dim = 2
n_samples = 10000
n_test_samples = 100
deg = 2
basis = xe.PolynomBasis.Legendre
x = np.linspace(0, 1, x_dim)
def discretized_fnc(y):
return fnc(x, y)
nodes = 2*np.random.rand(n_samples, y_dim)-1
values = [xe.Tensor.from_ndarray(discretized_fnc(y)) for y in nodes]
vector = lambda x: xe.Tensor.from_ndarray(legval(x, np.eye(deg+1)))
measurements = [[vector(ni) for ni in node] for node in nodes]
dimension = [x_dim] + [deg+1]*y_dim
reco = xe.uq_ra_adf(measurements, values, dimension, targeteps=1e-8, maxitr=70)
#TODO: implement a xerus function: tt_evaluate(tt, pos, pos2meas) where pos2meas is a function pos2meas(int mode, int idx, pos) that calculates the idx-th basis function in the given mode
#TODO: implement a xerus function: measurements(pos_vector, pos2meas)
test_nodes = 2*np.random.rand(n_test_samples, y_dim)-1
error = 0
for y in test_nodes:
res = xe.uq_tt_evaluate(reco, y, basis).to_ndarray()
ref = discretized_fnc(y)
error += np.linalg.norm(res - ref) / np.linalg.norm(ref)
error /= n_test_samples
self.assertLessEqual(error, 1e-3)
def test_small_reconstruction(self):
# the function to approximate
def fnc(x, y):
return np.sin(2*np.pi*x)*(y[0] + 0.1*y[1]**2) + np.cos(2*np.pi*x)*y[1]
x_dim = 100
y_dim = 2
n_samples = 10000
n_test_samples = 100
x = np.linspace(0, 1, x_dim)
def discretized_fnc(y):
return fnc(x, y)
nodes = 2*np.random.rand(n_samples, y_dim)-1
measurements = xe.UQMeasurementSet()
for y in nodes:
u = discretized_fnc(y)
measurements.add(y, xe.Tensor.from_ndarray(u))
basis = xe.PolynomBasis.Legendre
dimension = [x_dim] + [3]*y_dim
reco = xe.uq_ra_adf(measurements, basis, dimension, targeteps=1e-8, maxitr=70)
test_nodes = 2*np.random.rand(n_test_samples, y_dim)-1
error = 0
for y in test_nodes:
res = xe.uq_tt_evaluate(reco, y, basis).to_ndarray()
ref = discretized_fnc(y)
error += np.linalg.norm(res - ref) / np.linalg.norm(ref)
error /= n_test_samples
self.assertLessEqual(error, 1e-3)
# def test_large_reconstruction(self):
# data = np.load('samples.npz')
# measurements = xe.UQMeasurementSet()
# for y,u in zip(data['nodes'], data['values']):
# measurements.add(y, xe.Tensor.from_ndarray(u))
# basis = xe.PolynomBasis.Legendre
# dimension = [data['values'].shape[1]] + [8]*data['nodes'].shape[1]
# reco = xe.uq_ra_adf(measurements, basis, dimension, targeteps=1e-8, maxitr=1000)
# #TODO: just assert that the residuum is below 1e-2
# ref = xe.load("reconstruction.xrs")
# self.assertLessEqual(xe.frob_norm(reco - ref), 1e-8)
def test_small_reconstruction_weighted(self):
# the function to approximate
def fnc(x, y):
return np.sin(2*np.pi*x)*(y[0] + 0.1*y[1]**2) + np.cos(2*np.pi*x)*y[1]
x_dim = 100
y_dim = 2
n_samples = 10000
n_test_samples = 100
deg = 2
basis = xe.PolynomBasis.Legendre
x = np.linspace(0, 1, x_dim)
def discretized_fnc(y):
return fnc(x, y)
path = os.path.join(os.path.dirname(__file__), "cm_samples.npz")
cm_samples = np.load(path)
nodes = cm_samples["samples"][:n_samples]
values = [xe.Tensor.from_ndarray(discretized_fnc(y)) for y in nodes]
vector = lambda x: xe.Tensor.from_ndarray(legval(x, np.eye(deg+1)))
measurements = [[vector(ni) for ni in node] for node in nodes]
weights = cm_samples["weights"][:n_samples]
dimension = [x_dim] + [deg+1]*y_dim
reco = xe.uq_ra_adf(measurements, values, weights, dimension, targeteps=1e-8, maxitr=70)
#TODO: implement a xerus function: tt_evaluate(tt, pos, pos2meas) where pos2meas is a function pos2meas(int mode, int idx, pos) that calculates the idx-th basis function in the given mode
#TODO: implement a xerus function: measurements(pos_vector, pos2meas)
test_nodes = 2*np.random.rand(n_test_samples, y_dim)-1
error = 0
for y in test_nodes:
res = xe.uq_tt_evaluate(reco, y, basis).to_ndarray()
ref = discretized_fnc(y)
error += np.linalg.norm(res - ref)**2 / np.linalg.norm(ref)**2
error = np.sqrt(error) / n_test_samples
self.assertLessEqual(error, 1e-3)
def test_small_reconstruction_explicit(self):
# the function to approximate
def fnc(x, y):
return np.sin(2*np.pi*x)*(y[0] + 0.1*y[1]**2) + np.cos(2*np.pi*x)*y[1]
x_dim = 100
y_dim = 2
n_samples = 10000
n_test_samples = 100
deg = 2
basis = xe.PolynomBasis.Legendre
x = np.linspace(0, 1, x_dim)
def discretized_fnc(y):
return fnc(x, y)
nodes = 2*np.random.rand(n_samples, y_dim)-1
values = [xe.Tensor.from_ndarray(discretized_fnc(y)) for y in nodes]
vector = lambda x: xe.Tensor.from_ndarray(legval(x, np.eye(deg+1)))
measurements = [[vector(ni) for ni in node] for node in nodes]
dimension = [x_dim] + [deg+1]*y_dim
reco = xe.uq_ra_adf(measurements, values, dimension, targeteps=1e-8, maxitr=70)
#TODO: implement a xerus function: tt_evaluate(tt, pos, pos2meas) where pos2meas is a function pos2meas(int mode, int idx, pos) that calculates the idx-th basis function in the given mode
#TODO: implement a xerus function: measurements(pos_vector, pos2meas)
test_nodes = 2*np.random.rand(n_test_samples, y_dim)-1
error = 0
for y in test_nodes:
res = xe.uq_tt_evaluate(reco, y, basis).to_ndarray()
ref = discretized_fnc(y)
error += np.linalg.norm(res - ref) / np.linalg.norm(ref)
error /= n_test_samples
self.assertLessEqual(error, 1e-3)
def test_small_reconstruction(self):
# the function to approximate
def fnc(x, y):
return np.sin(2*np.pi*x)*(y[0] + 0.1*y[1]**2) + np.cos(2*np.pi*x)*y[1]
x_dim = 100
y_dim = 2
n_samples = 10000
n_test_samples = 100
x = np.linspace(0, 1, x_dim)
def discretized_fnc(y):
return fnc(x, y)
nodes = 2*np.random.rand(n_samples, y_dim)-1
measurements = xe.UQMeasurementSet()
for y in nodes:
u = discretized_fnc(y)
measurements.add(y, xe.Tensor.from_ndarray(u))
basis = xe.PolynomBasis.Legendre
dimension = [x_dim] + [3]*y_dim
reco = xe.uq_ra_adf(measurements, basis, dimension, targeteps=1e-8, maxitr=70)
test_nodes = 2*np.random.rand(n_test_samples, y_dim)-1
error = 0
for y in test_nodes:
res = xe.uq_tt_evaluate(reco, y, basis).to_ndarray()
ref = discretized_fnc(y)
error += np.linalg.norm(res - ref) / np.linalg.norm(ref)
error /= n_test_samples
self.assertLessEqual(error, 1e-3)
# def test_large_reconstruction(self):
# data = np.load('samples.npz')
# measurements = xe.UQMeasurementSet()
# for y,u in zip(data['nodes'], data['values']):
# measurements.add(y, xe.Tensor.from_ndarray(u))
# basis = xe.PolynomBasis.Legendre
# dimension = [data['values'].shape[1]] + [8]*data['nodes'].shape[1]
# reco = xe.uq_ra_adf(measurements, basis, dimension, targeteps=1e-8, maxitr=1000)
# #TODO: just assert that the residuum is below 1e-2
# ref = xe.load("reconstruction.xrs")
# self.assertLessEqual(xe.frob_norm(reco - ref), 1e-8)
if __name__ == '__main__':
unittest.main()
unittest.main()
This diff is collapsed.
......@@ -31,85 +31,85 @@
namespace xerus {
TTTensor randomTTSVD(const Tensor& _x, const std::vector<size_t>& _ranks) {
const size_t d = _x.order();
const size_t d = _x.order();
REQUIRE(d == _ranks.size()+1, "Inconsitend order vs number of provided ranks.");
TTTensor u(d);
Tensor a;
TTTensor u(d);
Tensor a;
Tensor b = _x;
for(size_t j = d-1; j > 0; --j) {
for(size_t j = d-1; j > 0; --j) {
const size_t contractSize = misc::product(b.dimensions, 0, j);
const size_t staySize = misc::product(b.dimensions, j, b.dimensions.size());
const size_t s = std::min(_ranks[j-1], std::min(contractSize, staySize)+1);
if(b.is_sparse()) {
std::map<size_t, std::vector<value_t>> usedG;
const size_t staySize = misc::product(b.dimensions, j, b.dimensions.size());
const size_t s = std::min(_ranks[j-1], std::min(contractSize, staySize)+1);
if(b.is_sparse()) {
std::map<size_t, std::vector<value_t>> usedG;
std::vector<size_t> outDims({s});
outDims.insert(outDims.end(), b.dimensions.cbegin()+j, b.dimensions.cend());
a = Tensor(outDims, Tensor::Representation::Sparse, Tensor::Initialisation::Zero);
const auto& data = b.get_sparse_data();
const auto& data = b.get_sparse_data();
for(const auto& entry : data) {
const size_t pos = entry.first/staySize;
const size_t outPos = entry.first%staySize;
auto& gEntry = usedG[pos];
if(gEntry.empty()) {
gEntry.reserve(s);
for(size_t k = 0; k < s; ++k) {
gEntry.push_back(misc::defaultNormalDistribution(misc::randomEngine));
}
}
for(size_t k = 0; k < s; ++k) {
a[k*staySize+outPos] += gEntry[k]*entry.second;
}
}
for(const auto& entry : data) {
const size_t pos = entry.first/staySize;
const size_t outPos = entry.first%staySize;
auto& gEntry = usedG[pos];
if(gEntry.empty()) {
gEntry.reserve(s);
for(size_t k = 0; k < s; ++k) {
gEntry.push_back(misc::defaultNormalDistribution(misc::randomEngine));
}
}
for(size_t k = 0; k < s; ++k) {
a[k*staySize+outPos] += gEntry[k]*entry.second;
}
}
} else {
std::vector<size_t> gDims({s});
gDims.insert(gDims.end(), b.dimensions.cbegin(), b.dimensions.cbegin()+j);
const Tensor g = Tensor::random(gDims, misc::defaultNormalDistribution, misc::randomEngine);
contract(a, g, false, b, false, j);
}
Tensor R, Q;
calculate_cq(R, Q, a, 1);
if(j == d-1) {
contract(b, b, false, Q, true, 1);
Q.reinterpret_dimensions(Q.dimensions | std::vector<size_t>({1}));
u.set_component(j, Q);
} else {
contract(b, b, false, Q, true, 2);
u.set_component(j, Q);
}
}
b.reinterpret_dimensions(std::vector<size_t>({1}) | b.dimensions);
u.set_component(0, b);
return u;
} else {
std::vector<size_t> gDims({s});
gDims.insert(gDims.end(), b.dimensions.cbegin(), b.dimensions.cbegin()+j);
const Tensor g = Tensor::random(gDims, misc::defaultNormalDistribution, misc::randomEngine);
contract(a, g, false, b, false, j);
}
Tensor R, Q;
calculate_cq(R, Q, a, 1);
if(j == d-1) {
contract(b, b, false, Q, true, 1);
Q.reinterpret_dimensions(Q.dimensions | std::vector<size_t>({1}));
u.set_component(j, Q);
} else {
contract(b, b, false, Q, true, 2);
u.set_component(j, Q);
}
}
b.reinterpret_dimensions(std::vector<size_t>({1}) | b.dimensions);
u.set_component(0, b);
return u;
}
TTTensor randomTTSVD(const Tensor& _x, const std::vector<size_t>& _ranks, const std::vector<size_t>& _oversampling) {
REQUIRE(_ranks.size() == _oversampling.size(), "Inconsitend rank/oversampling sizes.");
std::vector<size_t> sampRanks = _ranks;
std::vector<size_t> sampRanks = _ranks;
for(size_t i = 0; i < _ranks.size(); ++i) {
sampRanks[i] += _oversampling[i];
}
auto ttX = randomTTSVD(_x, sampRanks);
ttX.round(_ranks);
return ttX;
ttX.round(_ranks);
return ttX;
}