Commit fab49ac1 authored by Sebastian Wolf's avatar Sebastian Wolf

Work on ASD and perfDate+measSets

parent fecee9e9
Pipeline #893 failed with stages
in 8 minutes and 52 seconds
......@@ -33,9 +33,18 @@ namespace xerus {
class ASDVariant : public OptimizationAlgorithm {
public:
/// fully defining constructor. alternatively ALSVariants can be created by copying a predefined variant and modifying it
ASDVariant(const size_t _maxIteration, const double _targetResidual, const double _minimalResidualDecrease)
: OptimizationAlgorithm(_maxIteration, _targetResidual, _minimalResidualDecrease) { }
double minRankEps = 1e-8;
double epsDecay = 0.8;
double controlSetFraction = 0.1;
double initialRankEps = 0.01;
/// Basic constructor
ASDVariant(const size_t _maxIterations, const double _targetRelativeResidual, const double _minimalResidualNormDecrease)
: OptimizationAlgorithm(0, _maxIterations, _targetRelativeResidual, _minimalResidualNormDecrease) { }
/**
* @brief Tries to reconstruct the (low rank) tensor _x from the given measurments.
......
......@@ -33,16 +33,23 @@ namespace xerus {
*/
class OptimizationAlgorithm {
public:
///@brief Maximal allowed number of iterations
const size_t maxIterations;
///@brief Minimal number of iterations.
size_t minIterations;
///@brief The target relative residual at which the algorithm shall stop.
const double targetRelativeResidual;
///@brief Maximal allowed number of iterations.
size_t maxIterations;
///@brief The target residual norm at which the algorithm shall stop.
double targetRelativeResidual;
///@brief Minimal relative decrease of the residual norm ( newRes/oldRes ) until either the ranks are increased (if allowed) or the algorithm stops.
const double minimalResidualNormDecrease;
double minimalResidualNormDecrease;
///@brief Number of iterations used to check for stopping criteria (e.g. residual[iterations] <= residual[iteration-tracking]*pow(minimalResidualNormDecrease, tracking) )
size_t tracking = 100;
protected:
OptimizationAlgorithm(const size_t _maxIterations, const double _targetRelativeResidual, const double _minimalResidualNormDecrease);
OptimizationAlgorithm(const size_t _minIterations, const size_t _maxIterations, const double _targetRelativeResidual, const double _minimalResidualNormDecrease);
};
} // namespace xerus
......@@ -105,6 +105,9 @@ namespace xerus { namespace internal {
Tensor get_average_core() const;
TTTensor get_average_tt() const;
/**
* @brief Gets the ranks of the TTNetwork.
......
......@@ -36,6 +36,9 @@
namespace xerus {
class Tensor;
class TensorNetwork;
template<bool isOperator> class TTNetwork;
typedef TTNetwork<false> TTTensor;
typedef TTNetwork<true> TTOperator;
/**
* @brief Class used to represent a single point measurments.
......@@ -56,6 +59,8 @@ namespace xerus {
static SinglePointMeasurementSet random(const size_t _numMeasurements, const Tensor& _solution);
static SinglePointMeasurementSet random(const size_t _numMeasurements, const TTTensor& _solution);
static SinglePointMeasurementSet random(const size_t _numMeasurements, const TensorNetwork& _solution);
static SinglePointMeasurementSet random(const size_t _numMeasurements, const std::vector<size_t>& _dimensions, std::function<value_t(const std::vector<size_t>&)> _callback);
......@@ -74,6 +79,8 @@ namespace xerus {
void measure(const Tensor& _solution);
// void measure(const TTTensor& _solution); NICE: Minor speedup
void measure(const TensorNetwork& _solution);
void measure(std::function<value_t(const std::vector<size_t>&)> _callback);
......@@ -109,6 +116,8 @@ namespace xerus {
static RankOneMeasurementSet random(const size_t _numMeasurements, const Tensor& _solution);
static RankOneMeasurementSet random(const size_t _numMeasurements, const TTTensor& _solution);
static RankOneMeasurementSet random(const size_t _numMeasurements, const TensorNetwork& _solution);
static RankOneMeasurementSet random(const size_t _numMeasurements, const std::vector<size_t>& _dimensions, std::function<value_t(const std::vector<Tensor>&)> _callback);
......@@ -129,6 +138,8 @@ namespace xerus {
void measure(const Tensor& _solution);
void measure(const TTTensor& _solution);
void measure(const TensorNetwork& _solution);
void measure(std::function<value_t(const std::vector<Tensor>&)> _callback);
......
......@@ -35,7 +35,6 @@
namespace xerus {
template<bool isOperator> class TTNetwork;
typedef TTNetwork<false> TTTensor;
typedef TTNetwork<true> TTOperator;
......@@ -43,15 +42,15 @@ namespace xerus {
class PerformanceData {
public:
struct DataPoint {
size_t iterationCount;
size_t iteration;
size_t elapsedTime;
double residual;
std::vector<double> residuals;
double error;
TensorNetwork::RankTuple ranks;
size_t dofs;
size_t flags;
DataPoint(const size_t _itrCount, const size_t _time, const value_t _residual, const value_t _error, const TensorNetwork::RankTuple _ranks, const size_t _flags)
: iterationCount(_itrCount), elapsedTime(_time), residual(_residual), error(_error), ranks(_ranks), flags(_flags) {}
DataPoint(const size_t _itrCount, const size_t _time, const std::vector<double>& _residual, const value_t _error, const size_t _dofs, const size_t _flags)
: iteration(_itrCount), elapsedTime(_time), residuals(_residual), error(_error), dofs(_dofs), flags(_flags) {}
};
const bool active;
......@@ -68,73 +67,31 @@ namespace xerus {
std::string additionalInformation;
explicit PerformanceData(const bool _printProgress = false, const bool _active = true) :
active(_active), printProgress(_printProgress), startTime(~0ul), stopTime(~0ul) {}
explicit PerformanceData(const bool _printProgress = false, const bool _active = true);
explicit PerformanceData(const ErrorFunction& _errorFunction, const bool _printProgress = false, const bool _active = true) :
active(_active), printProgress(_printProgress), errorFunction(_errorFunction), startTime(~0ul), stopTime(~0ul) {}
explicit PerformanceData(const ErrorFunction& _errorFunction, const bool _printProgress = false, const bool _active = true);
void start() {
using ::xerus::misc::operator<<;
if (active) {
if(printProgress) {
std::stringstream ss(additionalInformation);
while (ss) {
std::string line;
std::getline(ss, line);
XERUS_LOG_SHORT(PerformanceData, line);
}
}
startTime = misc::uTime();
}
}
void start();
void stop_timer() {
if (active) {
stopTime = misc::uTime();
}
}
void stop_timer();
void continue_timer() {
if (active) {
size_t currtime = misc::uTime();
startTime += currtime - stopTime;
stopTime = ~0ul;
}
}
void continue_timer();
void reset() {
if (active) {
data.clear();
additionalInformation.clear();
startTime = ~0ul;
stopTime = ~0ul;
}
}
void reset();
size_t get_elapsed_time() const {
return misc::uTime() - startTime;
}
size_t get_elapsed_time() const;
size_t get_runtime() const {
if (stopTime != ~0ul) {
return stopTime - startTime;
} else {
return misc::uTime() - startTime;
}
}
size_t get_runtime() const;
void add(const size_t _itrCount, const value_t _residual, const TensorNetwork::RankTuple _ranks = TensorNetwork::RankTuple(), const size_t _flags = 0);
void add(const double _residual, const TTTensor& _x, const size_t _flags = 0);
void add(const size_t _itrCount, const value_t _residual, const TTTensor& _x, const size_t _flags = 0);
void add(const std::vector<double>& _residuals, const TTTensor& _x, const size_t _flags = 0);
void add(const value_t _residual, const TensorNetwork::RankTuple _ranks = TensorNetwork::RankTuple(), const size_t _flags = 0);
void add(const size_t _itrCount, const double _residual, const TTTensor& _x, const size_t _flags = 0);
void add(const value_t _residual, const TTTensor& _x, const size_t _flags = 0);
void add(const size_t _itrCount, const std::vector<double>& _residuals, const TTTensor& _x, const size_t _flags);
operator bool() const {
return active;
}
operator bool() const { return active; }
/// @brief The pipe operator allows to add everything that can be converted to string to the additional information in the header.
template<class T>
......
......@@ -293,7 +293,7 @@ namespace xerus {
/**
* @brief calculates the number of degrees of freedom of the manifold of fixed tt-rank that the given TTTensor is part of
*/
size_t degrees_of_freedom();
size_t degrees_of_freedom() const;
virtual void fix_mode(const size_t _mode, const size_t _slatePosition) override;
......
This diff is collapsed.
......@@ -26,5 +26,5 @@
namespace xerus {
OptimizationAlgorithm::OptimizationAlgorithm(const size_t _maxIterations, const double _targetRelativeResidual, const double _minimalResidualNormDecrease) : maxIterations(_maxIterations), targetRelativeResidual(_targetRelativeResidual), minimalResidualNormDecrease(_minimalResidualNormDecrease) {}
OptimizationAlgorithm::OptimizationAlgorithm(const size_t _minIterations, const size_t _maxIterations, const double _targetRelativeResidual, const double _minimalResidualNormDecrease) : minIterations(_minIterations), maxIterations(_maxIterations), targetRelativeResidual(_targetRelativeResidual), minimalResidualNormDecrease(_minimalResidualNormDecrease) {}
} // namespace xerus
......@@ -115,7 +115,7 @@ namespace xerus {
};
auto updatePerfdata = [&]() {
_perfData.add(currResidual);
_perfData.add(currResidual, _x);
};
updateResidual();
updatePerfdata();
......
......@@ -140,6 +140,21 @@ namespace xerus { namespace internal {
coreCmp(left, ext, right) = (1.0/double(P))*components[corePosition](left, ext, p, right)*Tensor::ones({P})(p);
return coreCmp;
}
TTTensor BlockTT::get_average_tt() const {
TTTensor ttRep(dimensions);
for(size_t i = 0; i < degree(); i++) {
if(i == corePosition) {
ttRep.set_component(i, get_average_core());
} else {
ttRep.set_component(i, get_component(i));
}
}
return ttRep;
}
value_t BlockTT::frob_norm() const {
......
......@@ -42,7 +42,6 @@ namespace xerus {
SinglePointMeasurementSet SinglePointMeasurementSet::random(const size_t _numMeasurements, const std::vector<size_t>& _dimensions) {
SinglePointMeasurementSet result;
result.create_random_positions(_numMeasurements, _dimensions);
result.measuredValues.resize(_numMeasurements, 0);
return result;
}
......@@ -53,6 +52,13 @@ namespace xerus {
result.measure(_solution);
return result;
}
SinglePointMeasurementSet SinglePointMeasurementSet::random(const size_t _numMeasurements, const TTTensor& _solution) {
SinglePointMeasurementSet result;
result.create_random_positions(_numMeasurements, _solution.dimensions);
result.measure(_solution);
return result;
}
SinglePointMeasurementSet SinglePointMeasurementSet::random(const size_t _numMeasurements, const TensorNetwork& _solution) {
SinglePointMeasurementSet result;
......@@ -86,24 +92,6 @@ namespace xerus {
measuredValues.emplace_back(_measuredValue);
}
void SinglePointMeasurementSet::sort(const bool _positionsOnly) {
const auto comperator = [](const std::vector<size_t>& _lhs, const std::vector<size_t>& _rhs) {
REQUIRE(_lhs.size() == _rhs.size(), "Inconsistent degrees in measurment positions.");
for (size_t i = 0; i < _lhs.size(); ++i) {
if (_lhs[i] < _rhs[i]) { return true; }
if (_lhs[i] > _rhs[i]) { return false; }
}
return false; // equality
};
if(_positionsOnly) {
std::sort(positions.begin(), positions.end(), comperator);
} else {
REQUIRE(positions.size() == measuredValues.size(), "Inconsitend SinglePointMeasurementSet encountered.");
misc::simultaneous_sort(positions, measuredValues, comperator);
}
}
value_t SinglePointMeasurementSet::frob_norm() const {
const auto cSize = size();
......@@ -218,9 +206,31 @@ namespace xerus {
}
struct vec_compare {
bool operator() (const std::vector<size_t>& _lhs, const std::vector<size_t>& _rhs) const {
REQUIRE(_lhs.size() == _rhs.size(), "Inconsistent degrees in measurment positions.");
for (size_t i = 0; i < _lhs.size(); ++i) {
if (_lhs[i] < _rhs[i]) { return true; }
if (_lhs[i] > _rhs[i]) { return false; }
}
return false; // equality
}
};
void SinglePointMeasurementSet::sort(const bool _positionsOnly) {
const vec_compare comperator;
if(_positionsOnly) {
std::sort(positions.begin(), positions.end(), comperator);
} else {
REQUIRE(positions.size() == measuredValues.size(), "Inconsitend SinglePointMeasurementSet encountered.");
misc::simultaneous_sort(positions, measuredValues, comperator);
}
}
void SinglePointMeasurementSet::create_random_positions(const size_t _numMeasurements, const std::vector<size_t>& _dimensions) {
using ::xerus::misc::operator<<;
XERUS_REQUIRE(misc::product(_dimensions) >= _numMeasurements, "It's impossible to perform as many measurements as requested. " << _numMeasurements << " > " << _dimensions);
// Create distributions
......@@ -229,22 +239,20 @@ namespace xerus {
indexDist.emplace_back(0, _dimensions[i]-1);
}
std::set<size_t> measuredPositions;
std::set<std::vector<size_t>, vec_compare> measuredPositions;
std::vector<size_t> multIdx(_dimensions.size());
while (positions.size() < _numMeasurements) {
size_t pos = 0;
while (measuredPositions.size() < _numMeasurements) {
for (size_t i = 0; i < _dimensions.size(); ++i) {
multIdx[i] = indexDist[i](misc::randomEngine);
pos *= _dimensions[i]; pos += multIdx[i];
}
if (!misc::contains(measuredPositions, pos)) {
measuredPositions.insert(pos);
positions.push_back(multIdx);
}
measuredPositions.insert(multIdx);
}
sort(true);
measuredValues.resize(_numMeasurements);
for(const auto& pos : measuredPositions) {
positions.push_back(pos);
}
measuredValues.resize(_numMeasurements, 0.0);
}
......@@ -252,6 +260,8 @@ namespace xerus {
// --------------------- RankOneMeasurementSet -----------------
......@@ -273,7 +283,6 @@ namespace xerus {
RankOneMeasurementSet RankOneMeasurementSet::random(const size_t _numMeasurements, const std::vector<size_t>& _dimensions) {
RankOneMeasurementSet result;
result.create_random_positions(_numMeasurements, _dimensions);
result.measuredValues.resize(_numMeasurements, 0);
return result;
}
......@@ -281,27 +290,34 @@ namespace xerus {
RankOneMeasurementSet RankOneMeasurementSet::random(const size_t _numMeasurements, const Tensor& _solution) {
RankOneMeasurementSet result;
result.create_random_positions(_numMeasurements, _solution.dimensions);
result.measure(_solution);
result.measure(_solution );
return result;
}
RankOneMeasurementSet RankOneMeasurementSet::random(const size_t _numMeasurements, const TTTensor& _solution) {
RankOneMeasurementSet result;
result.create_random_positions(_numMeasurements, _solution.dimensions);
result.measure(_solution );
return result;
}
RankOneMeasurementSet RankOneMeasurementSet::random(const size_t _numMeasurements, const TensorNetwork& _solution) {
RankOneMeasurementSet result;
result.create_random_positions(_numMeasurements, _solution.dimensions);
result.measure(_solution);
result.measure(_solution );
return result;
}
RankOneMeasurementSet RankOneMeasurementSet::random(const size_t _numMeasurements, const std::vector<size_t>& _dimensions, std::function<value_t(const std::vector<Tensor>&)> _callback) {
RankOneMeasurementSet result;
result.create_random_positions(_numMeasurements, _dimensions);
result.measure(_callback);
result.measure(_callback );
return result;
}
size_t RankOneMeasurementSet::size() const {
REQUIRE(positions.size() == measuredValues.size(), "Inconsitend SinglePointMeasurementSet encountered.");
REQUIRE(positions.size() == measuredValues.size(), "Inconsitend RankOneMeasurementSet encountered.");
return positions.size();
}
......@@ -374,22 +390,30 @@ namespace xerus {
const auto cSize = size();
for(size_t j = 0; j < cSize; ++j) {
size_t rebuildIndex = 0;
if(j > 0) {
// Find the maximal recyclable stack position
for(; rebuildIndex < degree(); ++rebuildIndex) {
if(!approx_equal(positions[j-1][rebuildIndex], positions[j][rebuildIndex])) {
break;
}
}
for(size_t i = 0; i < degree(); ++i) {
contract(stack[i+1], positions[j][i], stack[i], 1);
}
// Rebuild stack
for(size_t i = rebuildIndex; i < degree(); ++i) {
contract(stack[i+1], positions[j][i], false, stack[i], false, 1);
REQUIRE(stack.back().degree() == 0, "IE");
measuredValues[j] = stack.back()[0];
}
}
void RankOneMeasurementSet::measure(const TTTensor& _solution) {
REQUIRE(_solution.degree() == degree(), "Degrees of solution and measurements must match!");
std::vector<Tensor> stack(degree()+1);
stack[0] = Tensor::ones({1});
Tensor tmp;
const auto cSize = size();
for(size_t j = 0; j < cSize; ++j) {
for(size_t i = 0; i < degree(); ++i) {
contract(tmp, stack[i], _solution.get_component(i) , 1);
contract(stack[i+1], positions[j][i], tmp, 1);
}
stack.back().reinterpret_dimensions({});
REQUIRE(stack.back().degree() == 0, "IE");
measuredValues[j] = stack.back()[0];
}
}
......@@ -400,27 +424,18 @@ namespace xerus {
stack[0] = _solution;
stack[0].reduce_representation();
Index l, k;
const Index l, k;
const auto cSize = size();
for(size_t j = 0; j < cSize; ++j) {
size_t rebuildIndex = 0;
if(j > 0) {
// Find the maximal recyclable stack position
for(; rebuildIndex < degree(); ++rebuildIndex) {
if(!approx_equal(positions[j-1][rebuildIndex], positions[j][rebuildIndex])) {
break;
}
}
}
// Rebuild stack
for(size_t i = rebuildIndex; i < degree(); ++i) {
for(size_t i = 0; i < degree(); ++i) {
stack[i+1](k&0) = positions[j][i](l) * stack[i](l, k&1);
stack[i+1].reduce_representation();
}
REQUIRE(stack.back().degree() == 0, "IE");
measuredValues[j] = stack.back()[0];
}
}
......@@ -523,24 +538,19 @@ namespace xerus {
using ::xerus::misc::operator<<;
XERUS_REQUIRE(misc::product(_dimensions) >= _numMeasurements, "It's impossible to perform as many measurements as requested. " << _numMeasurements << " > " << _dimensions);
// Create distributions
std::vector<std::uniform_int_distribution<size_t>> indexDist;
for (size_t i = 0; i < _dimensions.size(); ++i) {
indexDist.emplace_back(0, _dimensions[i]-1);
}
std::vector<Tensor> randOnePosition(_dimensions.size());
while (positions.size() < _numMeasurements) {
for (size_t i = 0; i < _dimensions.size(); ++i) {
randOnePosition[i] = Tensor::random({_dimensions[i]});
randOnePosition[i] /= xerus::frob_norm(randOnePosition[i]);
randOnePosition[i].apply_factor();
}
// NOTE Assuming our random generator works, no identical positions should occour.
positions.push_back(randOnePosition);
}
sort(true);
measuredValues.resize(_numMeasurements);
measuredValues.resize(_numMeasurements, 0);
}
......
......@@ -31,77 +31,140 @@
namespace xerus {
void PerformanceData::add(const size_t _itrCount, const xerus::value_t _residual, const std::vector<size_t> _ranks, const size_t _flags) {
PerformanceData::PerformanceData(const bool _printProgress, const bool _active) :
active(_active), printProgress(_printProgress), startTime(~0ul), stopTime(~0ul) {}
PerformanceData::PerformanceData(const ErrorFunction& _errorFunction, const bool _printProgress, const bool _active) :
active(_active), printProgress(_printProgress), errorFunction(_errorFunction), startTime(~0ul), stopTime(~0ul) {}
void PerformanceData::start() {
if (active) {
if (startTime == ~0ul) {
start();
}
data.emplace_back(_itrCount, get_elapsed_time(), _residual, 0.0, _ranks, _flags);
if(printProgress) {
LOG_SHORT(PerformanceData, "Iteration " << std::setw(4) << std::setfill(' ') << _itrCount
<< " Time: " << std::right << std::setw(6) << std::setfill(' ') << std::fixed << std::setprecision(2) << double(data.back().elapsedTime)*1e-6
<< "s Residual: " << std::setw(11) << std::setfill(' ') << std::scientific << std::setprecision(6) << data.back().residual
<< " Flags: " << _flags << " Ranks: " << _ranks);
std::stringstream ss(additionalInformation);
while (ss) {
std::string line;
std::getline(ss, line);
XERUS_LOG_SHORT(PerformanceData, line);
}
}
startTime = misc::uTime();
}
}
void PerformanceData::add(const size_t _itrCount, const xerus::value_t _residual, const TTTensor& _x, const size_t _flags) {
if(!errorFunction) { add(_itrCount, _residual, _x.ranks(), _flags); return; }
void PerformanceData::stop_timer() {
if (active) {
stopTime = misc::uTime();
}
}
void PerformanceData::continue_timer() {
if (active) {
size_t currtime = misc::uTime();
startTime += currtime - stopTime;
stopTime = ~0ul;
}
}
void PerformanceData::reset() {
if (active) {
data.clear();
additionalInformation.clear();
startTime = ~0ul;
stopTime = ~0ul;
}
}
size_t PerformanceData::get_elapsed_time() const {
return misc::uTime() - startTime;
}
size_t PerformanceData::get_runtime() const {
if (stopTime != ~0ul) {
return stopTime - startTime;
} else {
return misc::uTime() - startTime;
}
}
void PerformanceData::add(const value_t _residual, const TTTensor& _x, const size_t _flags) {
if (data.empty()) {
add(0, std::vector<double>(1, _residual), _x, _flags);
} else {
add(data.back().iteration+1, std::vector<double>(1, _residual), _x, _flags);
}
}
void PerformanceData::add(const std::vector<double>& _residuals, const TTTensor& _x, const size_t _flags) {
if (data.empty()) {
add(0, _residuals, _x, _flags);
} else {
add(data.back().iteration+1, _residuals, _x, _flags);
}
}
void PerformanceData::add(const size_t _itrCount, double _residual, const TTTensor& _x, const size_t _flags) {
add(_itrCount, std::vector<double>(1, _residual), _x, _flags);
}
void PerformanceData::add(const size_t _itrCount, const std::vector<double>& _residuals, const TTTensor& _x, const size_t _flags) {
if (active) {
if (startTime == ~0ul) {
start();
}
stop_timer();
data.emplace_back(_itrCount, get_elapsed_time(), _residual, errorFunction(_x), _x.ranks(), _flags);
REQUIRE(!_residuals.empty(), "Need at least one residual");
const double error = errorFunction ? errorFunction(_x) : 0.0;
data.emplace_back(_itrCount, get_elapsed_time(), _residuals, error, _x.degrees_of_freedom(), _flags);
if (printProgress) {
LOG_SHORT(PerformanceData, "Iteration " << std::setw(4) << std::setfill(' ') << _itrCount
<< " Time: " << std::right << std::setw(6) << std::setfill(' ') << std::fixed << std::setprecision(2) << double(data.back().elapsedTime)*1e-6
<< "s Residual: " << std::setw(11) << std::setfill(' ') << std::scientific << std::setprecision(6) << data.back().residual
<< " Error: " << std::setw(11) << std::setfill(' ') << std::scientific << std::setprecision(6) << data.back().error
<< " Flags: " << _flags << " Ranks: " << _x.ranks()); // NOTE using data.back().ranks causes segmentation fault in gcc
if(errorFunction) {
LOG_SHORT(PerformanceData, "Iteration " << std::setw(4) << std::setfill(' ') << _itrCount
<< " Time: " << std::right << std::setw(6) << std::setfill(' ') << std::fixed << std::setprecision(2) << double(data.back().elapsedTime)*1e-6
<< "s Residuals: " << std::setw(11) << std::setfill(' ') << std::scientific << std::setprecision(6) << data.back().residuals
<< " Error: " << std::setw(11) << std::setfill(' ') << std::scientific << std::setprecision(6) << data.back().error
<< " Dofs: " << data.back().dofs << " Flags: " << _flags);
} else {
LOG_SHORT(PerformanceData, "Iteration " << std::setw(4) << std::setfill(' ') << _itrCount
<< " Time: " << std::right << std::setw(6) << std::setfill(' ') << std::fixed << std::setprecision(2) << double(data.back().elapsedTime)*1e-6
<< "s Residuals: " << std::setw(11) << std::setfill(' ') << std::scientific << std::setprecision(6) << data.back().residuals
<< " Dofs: " << data.back().dofs << " Flags: " << _flags);
}
}
continue_timer();
}
}
void PerformanceData::add(const xerus::value_t _residual, const TensorNetwork::RankTuple _ranks, const size_t _flags) {
if (active) {
if (data.empty()) {
add(0, _residual, _ranks, _flags);
} else {
add(data.back().iterationCount+1, _residual, _ranks, _flags);
}
}
}