Commit d82eb788 authored by Sebastian Wolf's avatar Sebastian Wolf

Chnage Tensor from data constructor for GCC 7

parent bb2379b9
Pipeline #765 passed with stages
in 6 minutes and 49 seconds
......@@ -28,44 +28,74 @@
namespace xerus {
TTTensor randomTTSVD(const Tensor& _x, const std::vector<size_t>& _ranks) {
std::random_device rd;
std::mt19937 rnd(rd());
std::normal_distribution<double> dist(0, 1);
const size_t d = _x.degree();
TTTensor u(d);
Tensor b = _x;
for(long j = d; j--; j >= 2) {
std::vector<size_t> gDims(_b.dimensions.cbegin(), _b.dimensions.begin()+(d-1));
Tensor g(gDims, Tensor::Representation::Sparse);
const auto& data = b.get_unsanitized_sparse_data();
for(const auto& entry : data) {
auto pos = Tensor::position_to_multiIndex(entry.first, b.dimensions);
pos.pop_back();
g[pos] = dist(rnd);
}
Tensor a;
contract(a, g, false, b, false, j-1);
Tensor R, Q;
calculate_rq(R, Q, a, 1);
u.set_component(j, Q);
if(j == d) {
contract(b, b, false, Q, true, 1);
} else {
contract(b, b, false, Q, true, 2);
}
}
u.set_component(1, b);
}
// TTTensor randomTTSVD(const Tensor& _x, const std::vector<size_t>& _ranks, const std::vector<size_t>& _oversampling) {
// std::normal_distribution<double> dist(0, 1);
//
// const size_t d = _x.degree();
// TTTensor u(d);
// Tensor b = _x;
//
// for(size_t j = d; j >= 2; --j) {
// const size_t s = _ranks[j-2] + _oversampling[j-2];
//
// const std::vector<size_t> mixDims(b.dimensions.cbegin(), b.dimensions.cbegin()+(j-1));
//
// std::vector<size_t> outDims({s});
// outDims.insert(outDims.end(), b.dimensions.cbegin()+(j-1), b.dimensions.cend());
//
// Tensor a(outDims, Tensor::Representation::Sparse, Tensor::Initialisation::Zero);
//
// if(b.is_sparse()) {
// const size_t staySize = misc::product(b.dimensions, j-1, b.dimensions.size());
//
// std::map<size_t, std::vector<value_t>> usedG;
//
// const auto& data = b.get_sparse_data();
// for(const auto& entry : data) {
// const size_t pos = entry.first/staySize;
// const size_t outPos = entry.first%staySize;
//
// auto& gEntry = usedG[pos];
// if(gEntry.empty()) {
// gEntry.reserve(s);
// for(size_t k = 0; k < s; ++k) {
// gEntry.push_back(dist(xerus::misc::randomEngine));
// }
// }
//
// for(size_t k = 0; k < s; ++k) {
// a[outPos+k*staySize] += gEntry[k]*entry.second;
// }
// }
//
// } else {
// std::vector<size_t> gDims({s});
// gDims.insert(gDims.end(), mixDims.cbegin(), mixDims.cend());
// const Tensor g = Tensor::random(gDims, dist, xerus::misc::randomEngine);
// contract(a, g, false, b, false, j-1);
// }
//
//
// Tensor R, Q;
// calculate_rq(R, Q, a, 1);
//
//
// if(j == d) {
// contract(b, b, false, Q, true, 1);
// Q.reinterpret_dimensions(Q.dimensions | std::vector<size_t>({1}));
// u.set_component(j-1, Q);
// } else {
// contract(b, b, false, Q, true, 2);
// u.set_component(j-1, Q);
// }
// }
//
// b.reinterpret_dimensions(std::vector<size_t>({1}) | b.dimensions);
// u.set_component(0, b);
//
// u.round(_ranks);
//
// return u;
}
......@@ -159,7 +159,7 @@ namespace xerus {
* @param _dimensions the dimensions of the new tensor.
* @param _data inital dense data in row-major order.
*/
explicit Tensor(DimensionTuple _dimensions, std::unique_ptr<value_t[]>&& _data);
explicit Tensor(DimensionTuple _dimensions, std::unique_ptr<value_t>&& _data);
/**
......
......@@ -65,7 +65,7 @@ namespace xerus {
}
Tensor::Tensor(DimensionTuple _dimensions, std::unique_ptr<value_t[]>&& _data)
Tensor::Tensor(DimensionTuple _dimensions, std::unique_ptr<value_t>&& _data)
: dimensions(std::move(_dimensions)), size(misc::product(dimensions)), representation(Representation::Dense), denseData(std::move(_data)) {
REQUIRE(size != 0, "May not create tensors with an dimension == 0.");
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment