Commit 7b191c54 authored by Michael Goette's avatar Michael Goette

updated some doxygen tags which created warnings when compiling

parent fb8c43c8
Pipeline #1071 failed with stages
in 13 minutes and 20 seconds
......@@ -85,7 +85,7 @@ namespace xerus {
* @param _A operator to solve for
* @param[in,out] _x in: initial guess, out: solution as found by the algorithm
* @param _b right-hand side of the equation to be solved
* @param _numHalfSweeps maximum number of half-sweeps to perform
* @param _numSteps maximum number of half-sweeps to perform
* @param _perfData vector of performance data (residuals after every microiteration)
* @returns the residual @f$|Ax-b|@f$ of the final @a _x
*/
......@@ -121,7 +121,7 @@ namespace xerus {
* call to minimze @f$ \|x - b\|^2 @f$ for @f$ x @f$
* @param[in,out] _x in: initial guess, out: solution as found by the algorithm
* @param _b right-hand side of the equation to be solved
* @param _numHalfSweeps maximum number of half-sweeps to perform
* @param _numSteps maximum number of half-sweeps to perform
* @param _perfData vector of performance data (residuals after every microiteration)
* @returns the residual @f$|x-b|@f$ of the final @a _x
*/
......
......@@ -30,6 +30,7 @@ namespace xerus {
* @brief Finds the position of the approximately largest entry.
* @details Finds an entry that is at least of size @a _accuracy * X_max in absolute value,
* where X_max is the largest entry of the tensor. The smaller @a _accuracy, the faster the algorithm will work.
* @param _T TTNetwork to check for largest entry
* @param _accuracy factor that determains the maximal deviation of the returned entry from the true largest entry.
* @param _lowerBound a lower bound for the largest entry, i.e. there must be an entry in the tensor which is at least of
* this size (in absolute value). The algorithm may fail completely if this is not fullfilled, but will work using its own
......
......@@ -85,7 +85,7 @@ namespace xerus {
* @param _A operator to solve for
* @param[in,out] _x in: initial guess, out: solution as found by the algorithm
* @param _b right-hand side of the equation to be solved
* @param _numHalfSweeps maximum number of half-sweeps to perform
* @param _numSteps maximum number of half-sweeps to perform
* @param _perfData vector of performance data (residuals after every microiteration)
* @returns the residual @f$|Ax-b|@f$ of the final @a _x
*/
......@@ -121,7 +121,7 @@ namespace xerus {
* call to minimze @f$ \|x - b\|^2 @f$ for @f$ x @f$
* @param[in,out] _x in: initial guess, out: solution as found by the algorithm
* @param _b right-hand side of the equation to be solved
* @param _numHalfSweeps maximum number of half-sweeps to perform
* @param _numSteps maximum number of half-sweeps to perform
* @param _perfData vector of performance data (residuals after every microiteration)
* @returns the residual @f$|x-b|@f$ of the final @a _x
*/
......
......@@ -298,7 +298,7 @@ namespace xerus {
/*- - - - - - - - - - - - - - - - - - - - - - - - - - Miscellaneous - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
public:
/**
/*
* @brief Reduces the given ranks to the maximal possible.
* @details If a given rank is already smaller or equal it is left unchanged.
* @param _ranks the inital ranks to be reduced.
......@@ -343,7 +343,6 @@ namespace xerus {
* change.
* @param _idx index of the component to set.
* @param _T Tensor to use as the new component tensor.
* @param i_isleave true if the component set is a leave false if it is an inner component
*/
void set_component(const size_t _idx, Tensor _T);
......@@ -566,6 +565,7 @@ namespace xerus {
/**
* @brief Calculates the entrywise divison of this HTNetwork by a constant @a _divisor.
* @details Internally this only results in a change in the global factor.
* @param _network the HTNetwork
* @param _divisor the divisor,
* @return the resulting scaled HTNetwork.
*/
......
......@@ -233,9 +233,9 @@ namespace xerus {
/**
* @brief Constructs a dense Tensor with the given dimensions and uses the given random generator and distribution to assign the values to the entries.
* @details The entries are assigned in the order they are stored (i.e. row-major order). Each assigned is a seperate call to the random distribution.
* @param _dimensions the future dimensions of the Tensor.
* @param _dimensions1 the future dimensions of the Tensor. Define the matrification on where the Tensor is orthogonal.
* @param _dimensions2 the future dimensions of the Tensor.
* @param _rnd the random generator to be used.
* @param _dist the random distribution to be used.
*/
template<class generator=std::mt19937_64>
static Tensor XERUS_warn_unused random_orthogonal(DimensionTuple _dimensions1, DimensionTuple _dimensions2, generator& _rnd=xerus::misc::randomEngine) {
......@@ -378,7 +378,7 @@ namespace xerus {
/**
* @brief Assigns the given TensorNetwork to this Tensor by completely contracting the network.
* @param _other the TensorNetwork to be to this Tensor.
* @param _network the TensorNetwork to be to this Tensor.
* @return a reference to this Tensor.
*/
Tensor& operator=(const TensorNetwork& _network);
......@@ -944,6 +944,8 @@ namespace xerus {
* @param _Vt Output Tensor for the resulting Vt.
* @param _input input Tensor of which the SVD shall be calculated.
* @param _splitPos index position at defining the matrification for which the SVD is calculated.
* @param _maxRank maximal Rank to be kept by SVD
* @param _eps all singular values smaller than _eps are cut off
*/
value_t calculate_svd(Tensor& _U, Tensor& _S, Tensor& _Vt, Tensor _input, const size_t _splitPos, const size_t _maxRank, const value_t _eps);
......
......@@ -321,7 +321,7 @@ namespace xerus {
/**
* @brief Indexes the TensorNetwork for read/write use.
* @param _args several [indices](@ref Index) determining the desired index order.
* @param _indices several [indices](@ref Index) determining the desired index order.
* @return an internal representation of an IndexedTensor(Network).
*/
internal::IndexedTensor<TensorNetwork> operator()(const std::vector<Index> & _indices);
......@@ -329,15 +329,15 @@ namespace xerus {
/**
* @brief Indexes the TensorNetwork for read/write use.
* @param _args several [indices](@ref Index) determining the desired index order.
* @param _indices several [indices](@ref Index) determining the desired index order.
* @return an internal representation of an IndexedTensor(Network).
*/
internal::IndexedTensor<TensorNetwork> operator()( std::vector<Index>&& _indices);
internal::IndexedTensor<TensorNetwork> operator()(std::vector<Index>&& _indices);
/**
* @brief Indexes the TensorNetwork for read only use.
* @param _args several [indices](@ref Index) determining the desired index order.
* @param _indices several [indices](@ref Index) determining the desired index order.
* @return an internal representation of an IndexedTensor(Network).
*/
internal::IndexedTensorReadOnly<TensorNetwork> operator()(const std::vector<Index> & _indices) const;
......@@ -345,10 +345,10 @@ namespace xerus {
/**
* @brief Indexes the TensorNetwork for read only use.
* @param _args several [indices](@ref Index) determining the desired index order.
* @param _indices several [indices](@ref Index) determining the desired index order.
* @return an internal representation of an IndexedTensor(Network).
*/
internal::IndexedTensorReadOnly<TensorNetwork> operator()( std::vector<Index>&& _indices) const;
internal::IndexedTensorReadOnly<TensorNetwork> operator()(std::vector<Index>&& _indices) const;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - Operator specializations - - - - - - - - - - - - - - - - - - - - - - - - - - */
......
......@@ -108,7 +108,7 @@ namespace xerus {
explicit TTNetwork(const Tensor& _tensor, const double _eps, const RankTuple& _maxRanks);
/**
/*
* @brief Transforms a given TensorNetwork to a TTNetwork.
* @details This is not yet implemented different from casting to Tensor and then using a HOSVD.
* @param _network The network to transform.
......@@ -573,6 +573,7 @@ namespace xerus {
/**
* @brief Calculates the entrywise divison of this TTNetwork by a constant @a _divisor.
* @details Internally this only results in a change in the global factor.
* @param _network the TTNetwork
* @param _divisor the divisor,
* @return the resulting scaled TTNetwork.
*/
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment