Commit 28c4111e authored by Ben Huber's avatar Ben Huber

TTNetwork::use_dense_representation and exporting dense_copy and sparse_copy...

TTNetwork::use_dense_representation and exporting dense_copy and sparse_copy to python; signal cascade example in documentation
parent 426a48e2
...@@ -2,6 +2,11 @@ ...@@ -2,6 +2,11 @@
Potentially breaking changes are marked with an exclamation point '!' at the begin of their description. Potentially breaking changes are marked with an exclamation point '!' at the begin of their description.
* 2017-05-31 v3.0.1
* Added TTNetwork::use_dense_representations() to convert all components to dense representation.
* Exporting Tensor::dense_copy() and Tensor::sparse_copy() to python.
* More examples on the homepage and checks for code-coverage.
* 2017-05-29 v3.0.0 * 2017-05-29 v3.0.0
* Python wrapper now stable. * Python wrapper now stable.
* ! REQUIRE macro now logs as error instead of fatal error. * ! REQUIRE macro now logs as error instead of fatal error.
......
#include <xerus.h>
#include <iostream>
#include <string>
#include <fstream>
using namespace xerus;
const size_t MAX_NUM_PER_SITE = 64;
Tensor create_M() {
Tensor M = -1*Tensor::identity({MAX_NUM_PER_SITE, MAX_NUM_PER_SITE});
for (size_t i = 0; i < MAX_NUM_PER_SITE-1; ++i) {
M[{i+1, i}] = 1.0;
}
return M;
}
Tensor create_L() {
Tensor L({MAX_NUM_PER_SITE, MAX_NUM_PER_SITE});
L.modify_diagonal_entries([](value_t& _x, const size_t _i) {
_x = double(_i)/double(_i+5);
});
return L;
}
Tensor create_S() {
Tensor S({MAX_NUM_PER_SITE, MAX_NUM_PER_SITE});
// Set diagonal
for (size_t i = 0; i < MAX_NUM_PER_SITE; ++i) {
S[{i, i}] = -double(i);
}
// Set offdiagonal
for (size_t i = 0; i < MAX_NUM_PER_SITE-1; ++i) {
S[{i, i+1}] = double(i+1);
}
return 0.07*S;
}
TTOperator create_operator(const size_t _degree) {
const Index i, j, k, l;
// Create matrices
const Tensor M = create_M();
const Tensor S = create_S();
const Tensor L = create_L();
const Tensor Sstar = 0.7*M+S;
const Tensor I = Tensor::identity({MAX_NUM_PER_SITE, MAX_NUM_PER_SITE});
// Create empty TTOperator
TTOperator A(2*_degree);
Tensor comp;
// Create first component
comp(i, j, k, l) =
Sstar(j, k)*Tensor::dirac({1, 3}, 0)(i, l)
+ L(j, k)*Tensor::dirac({1, 3}, 1)(i, l)
+ I(j, k)*Tensor::dirac({1, 3}, 2)(i, l);
A.set_component(0, comp);
// Create middle components
comp(i, j, k, l) =
I(j, k) * Tensor::dirac({3, 3}, {0, 0})(i, l)
+ M(j, k) * Tensor::dirac({3, 3}, {1, 0})(i, l)
+ S(j, k) * Tensor::dirac({3, 3}, {2, 0})(i, l)
+ L(j, k) * Tensor::dirac({3, 3}, {2, 1})(i, l)
+ I(j, k) * Tensor::dirac({3, 3}, {2, 2})(i, l);
for(size_t c = 1; c+1 < _degree; ++c) {
A.set_component(c, comp);
}
// Create last component
comp(i, j, k, l) =
I(j, k)*Tensor::dirac({3, 1}, 0)(i, l)
+ M(j, k)*Tensor::dirac({3, 1}, 1)(i, l)
+ S(j, k)*Tensor::dirac({3, 1}, 2)(i, l);
A.set_component(_degree-1, comp);
return A;
}
/// @brief calculates the one-norm of a TTTensor, assuming that all entries in it are positive
double one_norm(const TTTensor &_x) {
Index j;
return double(_x(j&0) * TTTensor::ones(_x.dimensions)(j&0));
}
std::vector<TTTensor> implicit_euler(const TTOperator& _A, TTTensor _x,
const double _stepSize, const size_t _n)
{
const TTOperator op = TTOperator::identity(_A.dimensions)-_stepSize*_A;
Index j,k;
auto ourALS = ALS_SPD;
ourALS.convergenceEpsilon = 0;
ourALS.numHalfSweeps = 2;
std::vector<TTTensor> results;
TTTensor nextX = _x;
results.push_back(_x);
for(size_t i = 0; i < _n; ++i) {
ourALS(op, nextX, _x);
// Normalize
double norm = one_norm(nextX);
nextX /= norm;
XERUS_LOG(iter, "Done itr " << i
<< " residual: " << frob_norm(op(j/2,k/2)*nextX(k&0) - _x(j&0))
<< " norm: " << norm);
_x = nextX;
results.push_back(_x);
}
return results;
}
double get_mean_concentration(const TTTensor& _res, const size_t _i) {
const Index k,l;
TensorNetwork result(_res);
const Tensor weights({MAX_NUM_PER_SITE}, [](const size_t _k){
return double(_k);
});
const Tensor ones = Tensor::ones({1, MAX_NUM_PER_SITE, 1});
for (size_t j = 0; j < _res.degree(); ++j) {
if (j == _i) {
result(l&0) = result(k, l&1) * weights(k);
} else {
result(l&0) = result(k, l&1) * ones(k);
}
}
// at this point the degree of 'result' is 0, so there is only one entry
return result[{}];
}
void print_mean_concentrations_to_file(const std::vector<TTTensor> &_result) {
std::fstream out("mean.dat", std::fstream::out);
for (const auto& res : _result) {
for (size_t k = 0; k < res.degree(); ++k) {
out << get_mean_concentration(res, k) << ' ';
}
out << std::endl;
}
}
int main() {
const size_t numProteins = 10;
const size_t numSteps = 300;
const double stepSize = 1.0;
const size_t rankX = 3;
auto start = TTTensor::dirac(
std::vector<size_t>(numProteins, MAX_NUM_PER_SITE),
0
);
start.use_dense_representations();
start += 1e-14 * TTTensor::random(
start.dimensions,
std::vector<size_t>(start.degree()-1, rankX-1)
);
const auto A = create_operator(numProteins);
const auto results = implicit_euler(A, start, stepSize, numSteps);
print_mean_concentrations_to_file(results);
}
#!/usr/bin/gnuplot
set term pngcairo size 600, 400
unset key
set grid
set output "cascade.png"
set xlabel "timestep"
set ylabel "mean concentration"
plot 'mean.dat' u 1 w l, \
'mean.dat' u 2 w l, \
'mean.dat' u 3 w l, \
'mean.dat' u 4 w l, \
'mean.dat' u 5 w l, \
'mean.dat' u 6 w l, \
'mean.dat' u 7 w l, \
'mean.dat' u 8 w l, \
'mean.dat' u 9 w l, \
'mean.dat' u 10 w l
import xerus as xe
MAX_NUM_PER_SITE = 64
def create_M():
M = -1 * xe.Tensor.identity([MAX_NUM_PER_SITE, MAX_NUM_PER_SITE])
for i in xrange(MAX_NUM_PER_SITE-1) :
M[[i+1, i]] = 1.0
return M
def create_L():
L = xe.Tensor([MAX_NUM_PER_SITE, MAX_NUM_PER_SITE])
for i in xrange(MAX_NUM_PER_SITE) :
L[[i,i]] = i / (i+5.0)
return L
def create_S():
S = xe.Tensor([MAX_NUM_PER_SITE, MAX_NUM_PER_SITE])
# set diagonal
for i in xrange(MAX_NUM_PER_SITE) :
S[[i,i]] = -i
# set offdiagonal
for i in xrange(MAX_NUM_PER_SITE-1) :
S[[i,i+1]] = i+1
return 0.07*S
def create_operator(degree):
i,j,k,l = xe.indices(4)
# create matrices
M = create_M()
S = create_S()
L = create_L()
Sstar = 0.7*M + S;
I = xe.Tensor.identity([MAX_NUM_PER_SITE, MAX_NUM_PER_SITE])
# create empty TTOperator
A = xe.TTOperator(2*degree)
# create first component
comp = xe.Tensor()
comp(i, j, k, l) << \
Sstar(j, k) * xe.Tensor.dirac([1, 3], 0)(i, l) \
+ L(j, k) * xe.Tensor.dirac([1, 3], 1)(i, l) \
+ I(j, k) * xe.Tensor.dirac([1, 3], 2)(i, l)
A.set_component(0, comp)
# create middle components
comp(i, j, k, l) << \
I(j, k) * xe.Tensor.dirac([3, 3], [0, 0])(i, l) \
+ M(j, k) * xe.Tensor.dirac([3, 3], [1, 0])(i, l) \
+ S(j, k) * xe.Tensor.dirac([3, 3], [2, 0])(i, l) \
+ L(j, k) * xe.Tensor.dirac([3, 3], [2, 1])(i, l) \
+ I(j, k) * xe.Tensor.dirac([3, 3], [2, 2])(i, l)
for c in xrange(1, degree-1) :
A.set_component(c, comp)
# create last component
comp(i, j, k, l) << \
I(j, k) * xe.Tensor.dirac([3, 1], 0)(i, l) \
+ M(j, k) * xe.Tensor.dirac([3, 1], 1)(i, l) \
+ S(j, k) * xe.Tensor.dirac([3, 1], 2)(i, l)
A.set_component(degree-1, comp)
return A
def one_norm(x):
i = xe.Index()
return float(x(i&0) * xe.TTTensor.ones(x.dimensions)(i&0))
def implicit_euler(A, x, stepSize, n):
op = xe.TTOperator.identity(A.dimensions) - stepSize*A
j,k = xe.indices(2)
ourALS = xe.ALS_SPD
ourALS.convergenceEpsilon = 1e-4
ourALS.numHalfSweeps = 100
results = [x]
nextX = xe.TTTensor(x)
for i in xrange(n) :
ourALS(op, nextX, x)
# normalize
norm = one_norm(nextX)
nextX /= norm
print("done itr", i, \
"residual:", xe.frob_norm(op(j/2,k/2)*nextX(k&0) - x(j&0)), \
"one-norm:", norm)
x = xe.TTTensor(nextX) # ensure it is a copy
results.append(x)
return results
def get_mean_concentration(x, i):
k,l = xe.indices(2)
result = xe.TensorNetwork(x)
weights = xe.Tensor.from_function([MAX_NUM_PER_SITE], lambda idx: idx[0])
ones = xe.Tensor.ones([MAX_NUM_PER_SITE])
for j in xrange(x.degree()) :
if j == i :
result(l&0) << result(k, l&1) * weights(k)
else :
result(l&0) << result(k, l&1) * ones(k)
# at this point the degree of 'result' is 0, so there is only one entry
return result[[]]
def print_mean_concentrations_to_file(results):
f = open("mean.dat", 'w')
for res in results :
for k in xrange(res.degree()) :
f.write(str(get_mean_concentration(res, k))+' ')
f.write('\n')
f.close()
numProteins = 10
numSteps = 200
stepSize = 1.0
rankX = 3
A = create_operator(numProteins)
start = xe.TTTensor.dirac([MAX_NUM_PER_SITE]*numProteins, 0)
for i in xrange(numProteins) :
start.set_component(i, start.get_component(i).dense_copy())
start += 1e-14 * xe.TTTensor.random(start.dimensions, [rankX-1]*(start.degree()-1))
results = implicit_euler(A, start, stepSize, numSteps)
print_mean_concentrations_to_file(results)
This diff is collapsed.
--- ---
layout: post layout: post
title: "The ALS Algorithm" title: "The ALS Algorithm"
date: 1000-12-10 date: 1000-11-10
topic: "Examples" topic: "Examples"
section: "Examples" section: "Examples"
--- ---
......
...@@ -46,6 +46,7 @@ input:checked + label { ...@@ -46,6 +46,7 @@ input:checked + label {
position: relative; position: relative;
width: 100%; width: 100%;
z-index: 5; z-index: 5;
margin-bottom: .25em;
} }
#content section { #content section {
...@@ -61,6 +62,7 @@ input:checked + label { ...@@ -61,6 +62,7 @@ input:checked + label {
.highlighter-rouge pre { .highlighter-rouge pre {
min-width: 662px; min-width: 662px;
tab-size: 4;
} }
#content #content1 { #content #content1 {
......
...@@ -295,6 +295,11 @@ namespace xerus { ...@@ -295,6 +295,11 @@ namespace xerus {
virtual void resize_mode(const size_t _mode, const size_t _newDim, const size_t _cutPos=~0ul) override; virtual void resize_mode(const size_t _mode, const size_t _newDim, const size_t _cutPos=~0ul) override;
/**
* @brief Converts all components to use dense representations.
* @note This might be required because not all functionality of TTNetworks is available with sparse component tensors.
*/
void use_dense_representations();
/** /**
* @brief Complete access to a specific component of the TT decomposition. * @brief Complete access to a specific component of the TT decomposition.
......
...@@ -208,6 +208,8 @@ void expose_tensor() { ...@@ -208,6 +208,8 @@ void expose_tensor() {
.def("use_sparse_representation", &Tensor::use_sparse_representation, .def("use_sparse_representation", &Tensor::use_sparse_representation,
(arg("epsilon")=EPSILON) (arg("epsilon")=EPSILON)
) )
.def("sparse_copy", &Tensor::sparse_copy)
.def("dense_copy", &Tensor::dense_copy)
.def("ensure_own_data", &Tensor::ensure_own_data) .def("ensure_own_data", &Tensor::ensure_own_data)
.def("ensure_own_data_no_copy", &Tensor::ensure_own_data_no_copy) .def("ensure_own_data_no_copy", &Tensor::ensure_own_data_no_copy)
.def("apply_factor", &Tensor::apply_factor) .def("apply_factor", &Tensor::apply_factor)
......
...@@ -51,6 +51,7 @@ void expose_ttnetwork() { ...@@ -51,6 +51,7 @@ void expose_ttnetwork() {
.def("dirac", static_cast<TTTensor (*)(Tensor::DimensionTuple, const Tensor::MultiIndex&)>(&TTTensor::dirac)) .def("dirac", static_cast<TTTensor (*)(Tensor::DimensionTuple, const Tensor::MultiIndex&)>(&TTTensor::dirac))
.def("dirac", static_cast<TTTensor (*)(Tensor::DimensionTuple, const size_t)>(&TTTensor::dirac)).staticmethod("dirac") .def("dirac", static_cast<TTTensor (*)(Tensor::DimensionTuple, const size_t)>(&TTTensor::dirac)).staticmethod("dirac")
.def("use_dense_representations", &TTTensor::use_dense_representations)
.def("reduce_to_maximal_ranks", &TTTensor::reduce_to_maximal_ranks).staticmethod("reduce_to_maximal_ranks") .def("reduce_to_maximal_ranks", &TTTensor::reduce_to_maximal_ranks).staticmethod("reduce_to_maximal_ranks")
// .def("degrees_of_freedom", static_cast<size_t (TTTensor::*)()>(&TTTensor::degrees_of_freedom)) // .def("degrees_of_freedom", static_cast<size_t (TTTensor::*)()>(&TTTensor::degrees_of_freedom))
.def("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTTensor::degrees_of_freedom)).staticmethod("degrees_of_freedom") .def("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTTensor::degrees_of_freedom)).staticmethod("degrees_of_freedom")
...@@ -115,6 +116,7 @@ void expose_ttnetwork() { ...@@ -115,6 +116,7 @@ void expose_ttnetwork() {
.def("dirac", static_cast<TTOperator (*)(Tensor::DimensionTuple, const Tensor::MultiIndex&)>(&TTOperator::dirac)) .def("dirac", static_cast<TTOperator (*)(Tensor::DimensionTuple, const Tensor::MultiIndex&)>(&TTOperator::dirac))
.def("dirac", static_cast<TTOperator (*)(Tensor::DimensionTuple, const size_t)>(&TTOperator::dirac)).staticmethod("dirac") .def("dirac", static_cast<TTOperator (*)(Tensor::DimensionTuple, const size_t)>(&TTOperator::dirac)).staticmethod("dirac")
.def("use_dense_representations", &TTOperator::use_dense_representations)
.def("reduce_to_maximal_ranks", &TTOperator::reduce_to_maximal_ranks).staticmethod("reduce_to_maximal_ranks") .def("reduce_to_maximal_ranks", &TTOperator::reduce_to_maximal_ranks).staticmethod("reduce_to_maximal_ranks")
// .def("degrees_of_freedom", static_cast<size_t (TTOperator::*)()>(&TTOperator::degrees_of_freedom)) // .def("degrees_of_freedom", static_cast<size_t (TTOperator::*)()>(&TTOperator::degrees_of_freedom))
.def("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTOperator::degrees_of_freedom)).staticmethod("degrees_of_freedom") .def("degrees_of_freedom", static_cast<size_t (*)(const std::vector<size_t>&, const std::vector<size_t>&)>(&TTOperator::degrees_of_freedom)).staticmethod("degrees_of_freedom")
......
...@@ -446,6 +446,13 @@ namespace xerus { ...@@ -446,6 +446,13 @@ namespace xerus {
} }
template<bool isOperator>
void TTNetwork<isOperator>::use_dense_representations() {
for (size_t i = 0; i < degree(); ++i) {
component(i).use_dense_representation();
}
}
template<bool isOperator> template<bool isOperator>
Tensor& TTNetwork<isOperator>::component(const size_t _idx) { Tensor& TTNetwork<isOperator>::component(const size_t _idx) {
REQUIRE(_idx == 0 || _idx < degree()/N, "Illegal index " << _idx <<" in TTNetwork::component, as there are onyl " << degree()/N << " components."); REQUIRE(_idx == 0 || _idx < degree()/N, "Illegal index " << _idx <<" in TTNetwork::component, as there are onyl " << degree()/N << " components.");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment