Commit 38bbb520 authored by Philipp  Trunschke's avatar Philipp Trunschke

add weighted uqRaAdf, its testcases and TODOs

parent bce73718
Pipeline #1046 passed with stages
in 7 minutes and 58 seconds
......@@ -39,5 +39,7 @@ namespace xerus { namespace uq {
TTTensor uq_ra_adf(const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<size_t>& _dimensions, const double _targetEps = 1e-8, const size_t _maxItr = 0);
TTTensor uq_ra_adf(const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<double>& _weights, const std::vector<size_t>& _dimensions, const double _targetEps = 1e-8, const size_t _maxItr = 0);
TTTensor uq_ra_adf_iv(TTTensor& _x, const UQMeasurementSet& _measurments, const PolynomBasis _basisType, const double _targetEps = 1e-8, const size_t _maxItr = 0);
}}
import unittest
import os, unittest
import xerus as xe
import numpy as np
from numpy.polynomial.legendre import legval
......@@ -16,6 +16,46 @@ class TestReconstruction(unittest.TestCase):
deg = 2
basis = xe.PolynomBasis.Legendre
x = np.linspace(0, 1, x_dim)
def discretized_fnc(y):
return fnc(x, y)
path = os.path.join(os.path.dirname(__file__), "cm_samples.npz")
cm_samples = np.load(path)
nodes = cm_samples["samples"][:n_samples]
values = [xe.Tensor.from_ndarray(discretized_fnc(y)) for y in nodes]
vector = lambda x: xe.Tensor.from_ndarray(legval(x, np.eye(deg+1)))
measurements = [[vector(ni) for ni in node] for node in nodes]
weights = cm_samples["weights"][:n_samples]
dimension = [x_dim] + [deg+1]*y_dim
reco = xe.uq_ra_adf(measurements, values, weights, dimension, targeteps=1e-8, maxitr=70)
#TODO: implement a xerus function: tt_evaluate(tt, pos, pos2meas) where pos2meas is a function pos2meas(int mode, int idx, pos) that calculates the idx-th basis function in the given mode
#TODO: implement a xerus function: measurements(pos_vector, pos2meas)
test_nodes = 2*np.random.rand(n_test_samples, y_dim)-1
error = 0
for y in test_nodes:
res = xe.uq_tt_evaluate(reco, y, basis).to_ndarray()
ref = discretized_fnc(y)
error += np.linalg.norm(res - ref)**2 / np.linalg.norm(ref)**2
error = np.sqrt(error) / n_test_samples
self.assertLessEqual(error, 1e-3)
def test_small_reconstruction_explicit(self):
# the function to approximate
def fnc(x, y):
return np.sin(2*np.pi*x)*(y[0] + 0.1*y[1]**2) + np.cos(2*np.pi*x)*y[1]
x_dim = 100
y_dim = 2
n_samples = 10000
n_test_samples = 100
deg = 2
basis = xe.PolynomBasis.Legendre
x = np.linspace(0, 1, x_dim)
def discretized_fnc(y):
return fnc(x, y)
......
......@@ -55,6 +55,7 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
const std::vector<std::vector<Tensor>> positions;
const std::vector<Tensor>& solutions;
const std::vector<double> weights;
TTTensor& outX;
......@@ -162,6 +163,7 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
maxIterations(_maxItr),
positions(create_positions(_x, _basisType, _measurments.parameterVectors)),
solutions(_measurments.solutions),
weights(std::vector<double>(N, 1.0)),
outX(_x),
x(_x, 0, P),
rightStack(d, std::vector<Tensor>(N)),
......@@ -183,6 +185,28 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
maxIterations(_maxItr),
positions(transpose_positions(_x, _positions, _solutions)),
solutions(_solutions),
weights(std::vector<double>(N, 1.0)),
outX(_x),
x(_x, 0, P),
rightStack(d, std::vector<Tensor>(N)),
leftIsStack(d, std::vector<Tensor>(N)),
leftOughtStack(d, std::vector<Tensor>(N)),
rankEps(_initalRankEps),
prevRanks(tracking+1, _x.ranks())
{
LOG(uqADF, "Set size: " << N);
shuffle_sets();
}
InternalSolver(TTTensor& _x, const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<double>& _weights, const size_t _maxItr, const double _targetEps, const double _initalRankEps) :
N(_solutions.size()),
d(_x.degree()),
targetResidual(_targetEps),
maxIterations(_maxItr),
positions(transpose_positions(_x, _positions, _solutions)),
solutions(_solutions),
weights(_weights),
outX(_x),
x(_x, 0, P),
rightStack(d, std::vector<Tensor>(N)),
......@@ -293,7 +317,7 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
// Combine with ought part
contract(dyadComp, isPart - leftOughtStack[_corePosition-1][j], dyadicPart, 0);
delta += dyadComp;
delta += weights[j] * dyadComp;
}
} else { // _corePosition == 0
Tensor shuffledX = x.get_core(_setId);
......@@ -308,7 +332,7 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
contract(dyadComp, dyadComp - solutions[j], rightStack[_corePosition+1][j], 0);
dyadComp.reinterpret_dimensions({1, dyadComp.dimensions[0], dyadComp.dimensions[1]});
delta += dyadComp;
delta += weights[j] * dyadComp;
}
}
......@@ -326,7 +350,7 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
const size_t j = sets[_setId][jIdx];
contract(tmp, _delta, rightStack[1][j], 1);
const double normPart = misc::sqr(frob_norm(tmp));
norm += normPart;
norm += weights[j] * normPart;
}
} else { // _corePosition > 0
Tensor shuffledDelta = reshuffle(_delta, {1, 0, 2});
......@@ -356,7 +380,7 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
}
REQUIRE(tmp.size == 1, "IE");
norm += tmp[0];
norm += weights[j] * tmp[0];
}
}
......@@ -584,6 +608,29 @@ namespace xerus { namespace uq { namespace impl_uqRaAdf {
return x;
}
TTTensor uq_ra_adf(const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<double>& _weights, const std::vector<size_t>& _dimensions, const double _targetEps, const size_t _maxItr) {
REQUIRE(_positions.size() == _solutions.size(), "Invalid measurments");
REQUIRE(_dimensions.front() == _solutions.front().size, "Inconsitent spacial dimension");
LOG(UQ, "Calculating Average as start.");
TTTensor x(_dimensions);
Tensor mean = sample_mean(_solutions);
// Set mean
mean.reinterpret_dimensions({1, x.dimensions[0], 1});
x.set_component(0, mean);
for(size_t k = 1; k < x.degree(); ++k) {
x.set_component(k, Tensor::dirac({1, x.dimensions[k], 1}, 0));
}
x.assume_core_position(0);
impl_uqRaAdf::InternalSolver<2> solver(x, _positions, _solutions, _weights, _maxItr, _targetEps, 1e-1);
solver.solve();
return x;
}
TTTensor uq_ra_adf_iv(TTTensor& _x, const UQMeasurementSet& _measurments, const PolynomBasis _basisType, const double _targetEps, const size_t _maxItr) {
REQUIRE(_measurments.parameterVectors.size() == _measurments.solutions.size(), "Invalid measurments");
REQUIRE(_x.dimensions.front() == _measurments.solutions.front().size, "Inconsitent spacial dimension");
......
......@@ -188,6 +188,11 @@ void expose_recoveryAlgorithms() {
}, (arg("positions"), arg("solutions"), arg("dimensions"), arg("targeteps"), arg("maxitr"))
);
def("uq_ra_adf", +[](const std::vector<std::vector<Tensor>>& _positions, const std::vector<Tensor>& _solutions, const std::vector<double>& _weights, const std::vector<size_t>& _dimensions, const double _targetEps, const size_t _maxItr){
return uq::uq_ra_adf(_positions, _solutions, _weights, _dimensions, _targetEps, _maxItr);
}, (arg("positions"), arg("solutions"), arg("weights"), arg("dimensions"), arg("targeteps"), arg("maxitr"))
);
def("uq_ra_adf_iv", +[](TTTensor& _x, const uq::UQMeasurementSet& _measurements, const uq::PolynomBasis _basisType, const double _targetEps, const size_t _maxItr){
return uq::uq_ra_adf_iv(_x, _measurements, _basisType, _targetEps, _maxItr);
}, (arg("initial guess"), arg("measurements"), arg("polynombasis"), arg("targeteps"), arg("maxitr"))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment