Commit df47de0f authored by Michael Goette's avatar Michael Goette

merged development

parents a078d4fa bb32430a
Pipeline #1051 passed with stages
in 7 minutes and 34 seconds
......@@ -7,7 +7,8 @@ help:
@printf "Possible make targets are:\n \
\t\tshared \t\t -- Build xerus as a shared library.\n \
\t\tstatic \t\t -- Build xerus as a static library.\n \
\t\tpython \t\t -- Build the xerus python wrappers.\n \
\t\tpython2 \t\t -- Build the xerus python2 wrappers.\n \
\t\tpython3 \t\t -- Build the xerus python3 wrappers.\n \
\t\tdoc \t\t -- Build the html documentation for the xerus library.\n \
\t\tinstall \t -- Install the shared library and header files (may require root).\n \
\t\ttest \t\t -- Build and run the xerus unit tests.\n \
......@@ -153,7 +154,8 @@ build/libxerus.so: $(MINIMAL_DEPS) $(XERUS_SOURCES) build/libxerus_misc.so
$(CXX) -shared -fPIC -Wl,-soname,libxerus.so $(FLAGS) -I include $(XERUS_SOURCES) -L ./build/ -Wl,--as-needed -lxerus_misc $(SUITESPARSE) $(LAPACK_LIBRARIES) $(ARPACK_LIBRARIES) $(BLAS_LIBRARIES) -o build/libxerus.so
python: build/python2/xerus.so build/python3/xerus.so
python2: build/python2/xerus.so
python3: build/python3/xerus.so
build/python2/xerus.so: $(MINIMAL_DEPS) $(PYTHON_SOURCES) build/libxerus.so
mkdir -p $(dir $@)
......@@ -231,8 +233,11 @@ test: $(TEST_NAME)
./$(TEST_NAME) all
test_python: # build/libxerus.so build/python3/xerus.so
@export PYTHONPATH=build/python3:${PYTHONPATH}; export LD_LIBRARY_PATH=build:${LD_LIBRARY_PATH}; pytest src/pyTests
test_python2: # build/libxerus.so build/python2/xerus.so
@PYTHONPATH=build/python2:${PYTHONPATH} LD_LIBRARY_PATH=build:${LD_LIBRARY_PATH} $(PYTEST2) src/pyTests
test_python3: # build/libxerus.so build/python3/xerus.so
@PYTHONPATH=build/python3:${PYTHONPATH} LD_LIBRARY_PATH=build:${LD_LIBRARY_PATH} $(PYTEST3) src/pyTests
fullTest: $(TUTORIALS) $(TEST_NAME)
......@@ -240,8 +245,7 @@ fullTest: $(TUTORIALS) $(TEST_NAME)
./$(TEST_NAME) all
.FORCE:
doc: .FORCE doc/parseDoxytags doc/findDoxytag
doc:
make -C doc doc
......
......@@ -3,10 +3,13 @@
#=================================================================================================
# Xerus can be compiled either with G++ or the Clang++ frontend of the LLVM.
# Set the CXX variable to the one you want to use.
# If you want to use OpenMP, make sure that your compiler supports OpenMP 4.0.
CXX = g++
# CXX = clang++
PYTHON2_CONFIG = python2-config
PYTHON3_CONFIG = python3-config
PYTHON2_TEST = pytest2
PYTHON3_TEST = pytest3
#=================================================================================================
# C++ Version
......
.PHONY: help doc clean serve
# ------------------------------------------------------------------------------------------------------
# Default rule should be the help message
# ------------------------------------------------------------------------------------------------------
......@@ -8,8 +10,7 @@ help:
\t\tserve \t\t -- Build the html documentation for the xerus library and offer it via 'jekyll serve'.\n \
\t\tclean \t\t -- Remove all documentation files.\n"
.FORCE:
doc: .FORCE parseDoxytags findDoxytag
doc: parseDoxytags findDoxytag
-mkdir html
doxygen doxygen/Doxyfile
./parseDoxytags
......@@ -20,7 +21,7 @@ clean:
-rm -f parseDoxytags findDoxytag
-rm -f xerus.tags xerus.tagfile
serve: .FORCE parseDoxytags findDoxytag
serve: parseDoxytags findDoxytag
-mkdir html
doxygen doxygen/Doxyfile
./parseDoxytags
......@@ -31,7 +32,7 @@ include ../makeIncludes/general.mk
include ../makeIncludes/warnings.mk
include ../makeIncludes/optimization.mk
FLAGS = $(strip $(WARNINGS) $(OPTIMIZE) $(OTHER))
FLAGS = $(strip $(WARNINGS) $(OPTIMIZE) $(OTHER)) -std=c++11
parseDoxytags: ../src/docHelper/parseDoxytags.cpp
$(CXX) $(FLAGS) ../src/docHelper/parseDoxytags.cpp -o parseDoxytags
......
#!/bin/bash
DIRNAME=$(dirname $0)
docker image rm -f firemarmot/xerus
docker image rm -f firemarmot
docker build -t firemarmot/xerus $DIRNAME/xerus
docker build -t firemarmot $DIRNAME
......@@ -37,5 +37,9 @@ namespace xerus { namespace uq {
TTTensor uq_ra_adf(const UQMeasurementSet& _measurments, const PolynomBasis _basisType, const std::vector<size_t>& _dimensions, const double _targetEps = 1e-8, const size_t _maxItr = 0);
TTTensor uq_ra_adf(const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<size_t>& _dimensions, const double _targetEps = 1e-8, const size_t _maxItr = 0);
TTTensor uq_ra_adf(const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<double>& _weights, const std::vector<size_t>& _dimensions, const double _targetEps = 1e-8, const size_t _maxItr = 0);
TTTensor uq_ra_adf_iv(TTTensor& _x, const UQMeasurementSet& _measurments, const PolynomBasis _basisType, const double _targetEps = 1e-8, const size_t _maxItr = 0);
}}
This diff is collapsed.
# np.random.seed(1337)
# def build_TestNumpyInterface(seed, num, ext):
# rsg = np.random.RandomState(seed)
# mnkls = rsg.randint(1, 20, (num,4))
# def make_test_to_ndarray(mn, kl):
# def test(self):
# ls = rsg.randn(ext, *mn)
# rs = rsg.randn(ext, *kl)
# for l,r in zip(ls,rs):
# lr = np.kron(l,r)
# L,R = project(lr, mn, kl)
# diff = norm(lr - np.kron(L,R))
# self.assertLessEqual(diff, 1e-12)
# return test
# def make_test_from_ndarray(mn, kl):
# def test(self):
# ls = rsg.randn(ext, *mn)
# rs = rsg.randn(ext, *kl)
# for l,r in zip(ls,rs):
# lr = np.kron(l,r)
# L,R = project(lr, mn, kl)
# diff = norm(lr - np.kron(L,R))
# self.assertLessEqual(diff, 1e-12)
# return test
# odir = dict()
# for mnkl in mnkls:
# name = "test_random_{}x{}_{}x{}".format(*mnkl)
# test = make_test(mnkl[:2], mnkl[2:])
# odir[name] = test
# return type("TestRandomKroneckerSplitting", (unittest.TestCase,), odir)
# TestRandomKroneckerSplitting = build_TestNumpyInterface(0, 100, 4)
from __future__ import division
import unittest
import itertools as _iter
import xerus as xe
import numpy as np
np.random.seed(1337)
ranges = lambda *args: _iter.product(*[range(arg) for arg in args])
class TestExtendedTT(unittest.TestCase):
def setUp(self):
self.dimension = [4, 6, 8]
self.ranks = [1, 8, 5, 1]
def test_from_function_set_component(self):
tt = xe.TTTensor(self.dimension)
arrs = []
for i in range(len(self.dimension)):
shape = [self.ranks[i], self.dimension[i], self.ranks[i+1]]
arr = np.random.randn(*shape)
x = xe.Tensor.from_function(shape, lambda x: arr[tuple(x)])
tt.set_component(i, x)
arrs.append(arr)
for i in range(len(self.dimension)):
self.assertTrue(np.all(arrs[i] == tt.get_component(i).to_ndarray()))
def test_eval_hermite(self):
from numpy.polynomial.hermite_e import hermeval
basis = xe.PolynomBasis.Hermite
# TODO: check with numpy hermite...
tt = xe.TTTensor(self.dimension)
arrs = []
for i in range(len(self.dimension)):
shape = [self.ranks[i], self.dimension[i], self.ranks[i+1]]
arr = np.random.randn(*shape)
x = xe.Tensor.from_function(shape, lambda x: arr[tuple(x)])
tt.set_component(i, x)
arrs.append(arr)
for (i, j, k) in ranges(*self.dimension):
x = xe.uq_tt_evaluate(tt, [j, k], basis)
x = x[[i]]
# loc_extt = extt([i, j, k])
# self.assertLessEqual(np.abs((loc_extt - loc_xett)/loc_extt), 1e-10)
import os, unittest
import xerus as xe
import numpy as np
from numpy.polynomial.legendre import legval
class TestReconstruction(unittest.TestCase):
def test_small_reconstruction_weighted(self):
# the function to approximate
def fnc(x, y):
return np.sin(2*np.pi*x)*(y[0] + 0.1*y[1]**2) + np.cos(2*np.pi*x)*y[1]
x_dim = 100
y_dim = 2
n_samples = 10000
n_test_samples = 100
deg = 2
basis = xe.PolynomBasis.Legendre
x = np.linspace(0, 1, x_dim)
def discretized_fnc(y):
return fnc(x, y)
path = os.path.join(os.path.dirname(__file__), "cm_samples.npz")
cm_samples = np.load(path)
nodes = cm_samples["samples"][:n_samples]
values = [xe.Tensor.from_ndarray(discretized_fnc(y)) for y in nodes]
vector = lambda x: xe.Tensor.from_ndarray(legval(x, np.eye(deg+1)))
measurements = [[vector(ni) for ni in node] for node in nodes]
weights = cm_samples["weights"][:n_samples]
dimension = [x_dim] + [deg+1]*y_dim
reco = xe.uq_ra_adf(measurements, values, weights, dimension, targeteps=1e-8, maxitr=70)
#TODO: implement a xerus function: tt_evaluate(tt, pos, pos2meas) where pos2meas is a function pos2meas(int mode, int idx, pos) that calculates the idx-th basis function in the given mode
#TODO: implement a xerus function: measurements(pos_vector, pos2meas)
test_nodes = 2*np.random.rand(n_test_samples, y_dim)-1
error = 0
for y in test_nodes:
res = xe.uq_tt_evaluate(reco, y, basis).to_ndarray()
ref = discretized_fnc(y)
error += np.linalg.norm(res - ref)**2 / np.linalg.norm(ref)**2
error = np.sqrt(error) / n_test_samples
self.assertLessEqual(error, 1e-3)
def test_small_reconstruction_explicit(self):
# the function to approximate
def fnc(x, y):
return np.sin(2*np.pi*x)*(y[0] + 0.1*y[1]**2) + np.cos(2*np.pi*x)*y[1]
x_dim = 100
y_dim = 2
n_samples = 10000
n_test_samples = 100
deg = 2
basis = xe.PolynomBasis.Legendre
x = np.linspace(0, 1, x_dim)
def discretized_fnc(y):
return fnc(x, y)
nodes = 2*np.random.rand(n_samples, y_dim)-1
values = [xe.Tensor.from_ndarray(discretized_fnc(y)) for y in nodes]
vector = lambda x: xe.Tensor.from_ndarray(legval(x, np.eye(deg+1)))
measurements = [[vector(ni) for ni in node] for node in nodes]
dimension = [x_dim] + [deg+1]*y_dim
reco = xe.uq_ra_adf(measurements, values, dimension, targeteps=1e-8, maxitr=70)
#TODO: implement a xerus function: tt_evaluate(tt, pos, pos2meas) where pos2meas is a function pos2meas(int mode, int idx, pos) that calculates the idx-th basis function in the given mode
#TODO: implement a xerus function: measurements(pos_vector, pos2meas)
test_nodes = 2*np.random.rand(n_test_samples, y_dim)-1
error = 0
for y in test_nodes:
res = xe.uq_tt_evaluate(reco, y, basis).to_ndarray()
ref = discretized_fnc(y)
error += np.linalg.norm(res - ref) / np.linalg.norm(ref)
error /= n_test_samples
self.assertLessEqual(error, 1e-3)
def test_small_reconstruction(self):
# the function to approximate
def fnc(x, y):
return np.sin(2*np.pi*x)*(y[0] + 0.1*y[1]**2) + np.cos(2*np.pi*x)*y[1]
x_dim = 100
y_dim = 2
n_samples = 10000
n_test_samples = 100
x = np.linspace(0, 1, x_dim)
def discretized_fnc(y):
return fnc(x, y)
nodes = 2*np.random.rand(n_samples, y_dim)-1
measurements = xe.UQMeasurementSet()
for y in nodes:
u = discretized_fnc(y)
measurements.add(y, xe.Tensor.from_ndarray(u))
basis = xe.PolynomBasis.Legendre
dimension = [x_dim] + [3]*y_dim
reco = xe.uq_ra_adf(measurements, basis, dimension, targeteps=1e-8, maxitr=70)
test_nodes = 2*np.random.rand(n_test_samples, y_dim)-1
error = 0
for y in test_nodes:
res = xe.uq_tt_evaluate(reco, y, basis).to_ndarray()
ref = discretized_fnc(y)
error += np.linalg.norm(res - ref) / np.linalg.norm(ref)
error /= n_test_samples
self.assertLessEqual(error, 1e-3)
# def test_large_reconstruction(self):
# data = np.load('samples.npz')
# measurements = xe.UQMeasurementSet()
# for y,u in zip(data['nodes'], data['values']):
# measurements.add(y, xe.Tensor.from_ndarray(u))
# basis = xe.PolynomBasis.Legendre
# dimension = [data['values'].shape[1]] + [8]*data['nodes'].shape[1]
# reco = xe.uq_ra_adf(measurements, basis, dimension, targeteps=1e-8, maxitr=1000)
# #TODO: just assert that the residuum is below 1e-2
# ref = xe.load("reconstruction.xrs")
# self.assertLessEqual(xe.frob_norm(reco - ref), 1e-8)
if __name__ == '__main__':
unittest.main()
This diff is collapsed.
......@@ -55,6 +55,7 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
const std::vector<std::vector<Tensor>> positions;
const std::vector<Tensor>& solutions;
const std::vector<double> weights;
TTTensor& outX;
......@@ -94,6 +95,26 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
}
static std::vector<std::vector<Tensor>> transpose_positions(const TTTensor& _x, const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions) {
REQUIRE(_positions.size() == _solutions.size(), "Incompatible positions and solutions vector");
for(size_t sample=0; sample < _positions.size(); ++sample) {
REQUIRE(_positions[sample].size() == _x.degree()-1, "Invalid measurement");
}
std::vector<std::vector<Tensor>> positions(_x.degree());
for(size_t corePosition=1; corePosition < _x.degree(); ++corePosition) {
positions[corePosition].reserve(_positions.size());
for(size_t sample=0; sample < _positions.size(); ++sample) {
REQUIRE(_positions[sample][corePosition-1].dimensions.size() == 1, "Invalid measurement component");
REQUIRE(_positions[sample][corePosition-1].size == _x.dimensions[corePosition], "Invalid measurement component");
positions[corePosition].push_back(_positions[sample][corePosition-1]);
}
}
return positions;
}
void shuffle_sets() {
sets = std::vector<std::vector<size_t>>(P);
controlSet.clear();
......@@ -142,6 +163,7 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
maxIterations(_maxItr),
positions(create_positions(_x, _basisType, _measurments.parameterVectors)),
solutions(_measurments.solutions),
weights(std::vector<double>(N, 1.0)),
outX(_x),
x(_x, 0, P),
rightStack(d, std::vector<Tensor>(N)),
......@@ -156,6 +178,49 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
}
InternalSolver(TTTensor& _x, const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const size_t _maxItr, const double _targetEps, const double _initalRankEps) :
N(_solutions.size()),
d(_x.degree()),
targetResidual(_targetEps),
maxIterations(_maxItr),
positions(transpose_positions(_x, _positions, _solutions)),
solutions(_solutions),
weights(std::vector<double>(N, 1.0)),
outX(_x),
x(_x, 0, P),
rightStack(d, std::vector<Tensor>(N)),
leftIsStack(d, std::vector<Tensor>(N)),
leftOughtStack(d, std::vector<Tensor>(N)),
rankEps(_initalRankEps),
prevRanks(tracking+1, _x.ranks())
{
LOG(uqADF, "Set size: " << N);
shuffle_sets();
}
InternalSolver(TTTensor& _x, const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<double>& _weights, const size_t _maxItr, const double _targetEps, const double _initalRankEps) :
N(_solutions.size()),
d(_x.degree()),
targetResidual(_targetEps),
maxIterations(_maxItr),
positions(transpose_positions(_x, _positions, _solutions)),
solutions(_solutions),
weights(_weights),
outX(_x),
x(_x, 0, P),
rightStack(d, std::vector<Tensor>(N)),
leftIsStack(d, std::vector<Tensor>(N)),
leftOughtStack(d, std::vector<Tensor>(N)),
rankEps(_initalRankEps),
prevRanks(tracking+1, _x.ranks())
{
LOG(uqADF, "Set size: " << N);
shuffle_sets();
}
void calc_left_stack(const size_t _position) {
REQUIRE(_position+1 < d, "Invalid corePosition");
......@@ -219,7 +284,9 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
if(_corePosition > 0) {
const Tensor shuffledX = reshuffle(x.get_core(_setId), {1, 0, 2});
#pragma omp parallel for firstprivate(dyadComp, tmp)
//TODO: schedule, threadprivate(dyadComp, tmp)
#pragma omp declare reduction(+: Tensor: omp_out += omp_in) initializer(omp_priv = Tensor(omp_orig.dimensions))
#pragma omp parallel for reduction(+: delta) firstprivate(_corePosition, _setId, dyadComp, tmp, shuffledX) default(none)
for(size_t jIdx = 0; jIdx < sets[_setId].size(); ++jIdx) {
const size_t j = sets[_setId][jIdx];
......@@ -250,22 +317,22 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
// Combine with ought part
contract(dyadComp, isPart - leftOughtStack[_corePosition-1][j], dyadicPart, 0);
#pragma omp critical
{ delta += dyadComp; }
delta += weights[j] * dyadComp;
}
} else { // _corePosition == 0
Tensor shuffledX = x.get_core(_setId);
shuffledX.reinterpret_dimensions({shuffledX.dimensions[1], shuffledX.dimensions[2]});
#pragma omp parallel for firstprivate(dyadComp, tmp)
//TODO: schedule, threadprivate(dyadComp, tmp)
#pragma omp declare reduction(+: Tensor: omp_out += omp_in) initializer(omp_priv = Tensor(omp_orig.dimensions))
#pragma omp parallel for reduction(+: delta) firstprivate(_corePosition, _setId, dyadComp, tmp, shuffledX) default(none)
for(size_t jIdx = 0; jIdx < sets[_setId].size(); ++jIdx) {
const size_t j = sets[_setId][jIdx];
contract(dyadComp, shuffledX, rightStack[_corePosition+1][j], 1);
contract(dyadComp, dyadComp - solutions[j], rightStack[_corePosition+1][j], 0);
dyadComp.reinterpret_dimensions({1, dyadComp.dimensions[0], dyadComp.dimensions[1]});
#pragma omp critical
{ delta += dyadComp; }
delta += weights[j] * dyadComp;
}
}
......@@ -283,7 +350,7 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
const size_t j = sets[_setId][jIdx];
contract(tmp, _delta, rightStack[1][j], 1);
const double normPart = misc::sqr(frob_norm(tmp));
norm += normPart;
norm += weights[j] * normPart;
}
} else { // _corePosition > 0
Tensor shuffledDelta = reshuffle(_delta, {1, 0, 2});
......@@ -313,7 +380,7 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
}
REQUIRE(tmp.size == 1, "IE");
norm += tmp[0];
norm += weights[j] * tmp[0];
}
}
......@@ -518,6 +585,52 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
return x;
}
TTTensor uq_ra_adf(const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<size_t>& _dimensions, const double _targetEps, const size_t _maxItr) {
REQUIRE(_positions.size() == _solutions.size(), "Invalid measurments");
REQUIRE(_dimensions.front() == _solutions.front().size, "Inconsitent spacial dimension");
LOG(UQ, "Calculating Average as start.");
TTTensor x(_dimensions);
Tensor mean = sample_mean(_solutions);
// Set mean
mean.reinterpret_dimensions({1, x.dimensions[0], 1});
x.set_component(0, mean);
for(size_t k = 1; k < x.degree(); ++k) {
x.set_component(k, Tensor::dirac({1, x.dimensions[k], 1}, 0));
}
x.assume_core_position(0);
impl_uqRaAdf::InternalSolver<2> solver(x, _positions, _solutions, _maxItr, _targetEps, 1e-1);
solver.solve();
return x;
}
TTTensor uq_ra_adf(const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<double>& _weights, const std::vector<size_t>& _dimensions, const double _targetEps, const size_t _maxItr) {
REQUIRE(_positions.size() == _solutions.size(), "Invalid measurments");
REQUIRE(_dimensions.front() == _solutions.front().size, "Inconsitent spacial dimension");
LOG(UQ, "Calculating Average as start.");
TTTensor x(_dimensions);
Tensor mean = sample_mean(_solutions);
// Set mean
mean.reinterpret_dimensions({1, x.dimensions[0], 1});
x.set_component(0, mean);
for(size_t k = 1; k < x.degree(); ++k) {
x.set_component(k, Tensor::dirac({1, x.dimensions[k], 1}, 0));
}
x.assume_core_position(0);
impl_uqRaAdf::InternalSolver<2> solver(x, _positions, _solutions, _weights, _maxItr, _targetEps, 1e-1);
solver.solve();
return x;
}
TTTensor uq_ra_adf_iv(TTTensor& _x, const UQMeasurementSet& _measurments, const PolynomBasis _basisType, const double _targetEps, const size_t _maxItr) {
REQUIRE(_measurments.parameterVectors.size() == _measurments.solutions.size(), "Invalid measurments");
REQUIRE(_x.dimensions.front() == _measurments.solutions.front().size, "Inconsitent spacial dimension");
......
This diff is collapsed.
......@@ -97,7 +97,14 @@ void expose_recoveryAlgorithms() {
.def("set_measuredValue", +[](RankOneMeasurementSet &_this, size_t _i, value_t _val){
_this.measuredValues[_i] = _val;
})
.def("add", &RankOneMeasurementSet::add)
/* void add(const std::vector<Tensor>& _position, const value_t _measuredValue); */
.def("add", +[](RankOneMeasurementSet& _self, const std::vector<Tensor>& _position, const value_t _measuredValue) {
_self.add(_position, _measuredValue);
})
/* void add(const std::vector<Tensor>& _position, const value_t _measuredValue, const value_t _weight); */
.def("add", +[](RankOneMeasurementSet& _self, const std::vector<Tensor>& _position, const value_t _measuredValue, const value_t _weight) {
_self.add(_position, _measuredValue, _weight);
})
.def("size", &RankOneMeasurementSet::size)
.def("degree", &RankOneMeasurementSet::degree)
.def("frob_norm", &RankOneMeasurementSet::frob_norm)
......@@ -165,6 +172,8 @@ void expose_recoveryAlgorithms() {
VECTOR_TO_PY(std::vector<double>, "DoubleVectorVector");
py_pair<std::vector<std::vector<double>>, std::vector<Tensor>>();
VECTOR_TO_PY(std::vector<Tensor>, "TensorVectorVector");
//def("uq_adf", +[](const UQMeasurementSet& _measurments, const TTTensor& _guess) {
// return uq_adf(_measurments, _guess);
//}, ( arg("measurments"), arg("guess")) );
......@@ -174,6 +183,16 @@ void expose_recoveryAlgorithms() {
}, (arg("measurements"), arg("polynombasis"), arg("dimensions"), arg("targeteps"), arg("maxitr"))
);
def("uq_ra_adf", +[](const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<size_t>& _dimensions, const double _targetEps, const size_t _maxItr){
return uq::uq_ra_adf(_positions, _solutions, _dimensions, _targetEps, _maxItr);
}, (arg("positions"), arg("solutions"), arg("dimensions"), arg("targeteps"), arg("maxitr"))
);
def("uq_ra_adf", +[](const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<double>& _weights, const std::vector<size_t>& _dimensions, const double _targetEps, const size_t _maxItr){
return uq::uq_ra_adf(_positions, _solutions, _weights, _dimensions, _targetEps, _maxItr);
}, (arg("positions"), arg("solutions"), arg("weights"), arg("dimensions"), arg("targeteps"), arg("maxitr"))
);
def("uq_ra_adf_iv", +[](TTTensor& _x, const uq::UQMeasurementSet& _measurements, const uq::PolynomBasis _basisType, const double _targetEps, const size_t _maxItr){
return uq::uq_ra_adf_iv(_x, _measurements, _basisType, _targetEps, _maxItr);
}, (arg("initial guess"), arg("measurements"), arg("polynombasis"), arg("targeteps"), arg("maxitr"))
......
......@@ -286,7 +286,7 @@ namespace xerus {
bool new_external = std::find(oldNodes.begin(), oldNodes.end(), l.other) != oldNodes.end();
if (!l.external && !new_external) { l.other = idMap[l.other]; }
if (!l.external && new_external) {
l.other = -1;
l.other = 0;
l.indexPosition = external_index_pos;
external_index_pos++;
l.external = true;
......
......@@ -616,7 +616,7 @@ namespace xerus {
}
}
while (exceeds_maximal_ranks()) {
while (exceeds_maximal_ranks() && !_keepRank) {
// Move left from given CorePosition
for (size_t n = _position; n > 0; --n) {
transfer_core(n+1, n, !_keepRank);
......@@ -1147,7 +1147,6 @@ namespace xerus {
}
}
}
// Use Tensor fallback
if (_other.tensorObjectReadOnly->nodes.size() > 1) {
LOG_ONCE(warning, "Assigning a general tensor network to TTOperator not yet implemented. casting to fullTensor first");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment