31 #include <boost/math/special_functions/hermite.hpp> 32 #include <boost/math/special_functions/legendre.hpp> 44 for (
unsigned i = 0; i < _polyDegree; ++i) {
46 p[i] = boost::math::hermite(i, _v/std::sqrt(2))/
std::pow(2.0, i/2.0);
58 const double solutionsNorm;
60 const std::vector<std::vector<Tensor>> positions;
61 const std::vector<Tensor>& solutions;
65 std::vector<std::vector<Tensor>> rightStack;
66 std::vector<std::vector<Tensor>> leftIsStack;
67 std::vector<std::vector<Tensor>> leftOughtStack;
72 static std::vector<std::vector<Tensor>>
create_positions(
const TTTensor& _x,
const std::vector<std::vector<double>>& _randomVariables) {
73 std::vector<std::vector<Tensor>> positions(_x.
degree());
75 for(
size_t corePosition = 1; corePosition < _x.
degree(); ++corePosition) {
76 positions[corePosition].reserve(_randomVariables.size());
77 for(
size_t j = 0; j < _randomVariables.size(); ++j) {
87 for(
const auto& s : _solutions) {
91 return std::sqrt(norm);
95 InternalSolver(
TTTensor& _x,
const std::vector<std::vector<double>>& _randomVariables,
const std::vector<Tensor>& _solutions) :
96 N(_randomVariables.size()),
100 solutions(_solutions),
104 leftOughtStack(d,
std::vector<
Tensor>(N))
106 REQUIRE(_randomVariables.size() == _solutions.size(),
"ERROR");
111 REQUIRE(_corePosition+1 < d,
"Invalid corePosition");
113 if(_corePosition == 0) {
117 #pragma omp parallel for 118 for(
size_t j = 0; j < N; ++j) {
120 contract(leftOughtStack[_corePosition][j], solutions[j], shuffledX, 1);
126 #pragma omp parallel for firstprivate(measCmp, tmp) 127 for(
size_t j = 0; j < N; ++j) {
128 contract(measCmp, positions[_corePosition][j], shuffledX, 1);
130 if(_corePosition > 1) {
131 contract(tmp, measCmp,
true, leftIsStack[_corePosition-1][j],
false, 1);
132 contract(leftIsStack[_corePosition][j], tmp, measCmp, 1);
134 contract(leftIsStack[_corePosition][j], measCmp,
true, measCmp,
false, 1);
137 contract(leftOughtStack[_corePosition][j], leftOughtStack[_corePosition-1][j], measCmp, 1);
144 REQUIRE(_corePosition > 0 && _corePosition < d,
"Invalid corePosition");
147 if(_corePosition < d-1) {
149 #pragma omp parallel for firstprivate(tmp) 150 for(
size_t j = 0; j < N; ++j) {
151 contract(tmp, positions[_corePosition][j], shuffledX, 1);
152 contract(rightStack[_corePosition][j], tmp, rightStack[_corePosition+1][j], 1);
156 #pragma omp parallel for 157 for(
size_t j = 0; j < N; ++j) {
158 contract(rightStack[_corePosition][j], positions[_corePosition][j], shuffledX, 1);
168 if(_corePosition > 0) {
171 #pragma omp parallel for firstprivate(dyadComp, tmp) 172 for(
size_t j = 0; j < N; ++j) {
175 if(_corePosition < d-1) {
176 contract(dyadicPart, positions[_corePosition][j], rightStack[_corePosition+1][j], 0);
178 dyadicPart = positions[_corePosition][j];
185 contract(isPart, positions[_corePosition][j], shuffledX, 1);
187 if(_corePosition < d-1) {
188 contract(isPart, isPart, rightStack[_corePosition+1][j], 1);
193 if(_corePosition > 1) {
194 contract(isPart, leftIsStack[_corePosition-1][j], isPart, 1);
199 contract(dyadComp, isPart - leftOughtStack[_corePosition-1][j], dyadicPart, 0);
202 { delta += dyadComp; }
208 #pragma omp parallel for firstprivate(dyadComp, tmp) 209 for(
size_t j = 0; j < N; ++j) {
210 contract(dyadComp, shuffledX, rightStack[_corePosition+1][j], 1);
211 contract(dyadComp, dyadComp - solutions[j], rightStack[_corePosition+1][j], 0);
212 dyadComp.reinterpret_dimensions({1, dyadComp.dimensions[0], dyadComp.dimensions[1]});
215 { delta += dyadComp; }
227 if(_corePosition == 0) {
228 #pragma omp parallel for firstprivate(tmp) reduction(+:norm) 229 for(
size_t j = 0; j < N; ++j) {
230 contract(tmp, _delta, rightStack[1][j], 1);
236 if(_corePosition == d-1) {
241 #pragma omp parallel for firstprivate(tmp, rightPart) reduction(+:norm) 242 for(
size_t j = 0; j < N; ++j) {
244 contract(tmp, positions[_corePosition][j], shuffledDelta, 1);
246 if(_corePosition < d-1) {
247 contract(rightPart, tmp, rightStack[_corePosition+1][j], 1);
252 if(_corePosition > 1) {
253 contract(tmp, rightPart, leftIsStack[_corePosition-1][j], 1);
256 contract(tmp, rightPart, rightPart, 1);
264 return std::sqrt(norm);
269 REQUIRE(_corePosition == 0,
"Invalid corePosition");
274 for(
size_t j = 0; j < N; ++j) {
281 return std::sqrt(norm);
286 std::vector<double> residuals(10, 1000.0);
287 const size_t maxIterations = 1000;
289 for(
size_t iteration = 0; maxIterations == 0 || iteration < maxIterations; ++iteration) {
293 for(
size_t corePosition = d-1; corePosition > 0; --corePosition) {
298 for(
size_t corePosition = 0; corePosition < x.
degree(); ++corePosition) {
299 if(corePosition == 0) {
302 if(residuals.back()/residuals[residuals.size()-10] > 0.99) {
303 LOG(
ADF,
"Residual decrease from " << std::scientific << residuals[10] <<
" to " << std::scientific << residuals.back() <<
" in " << residuals.size()-10 <<
" iterations.");
316 if(corePosition+1 < d) {
327 void uq_adf(
TTTensor& _x,
const std::vector<std::vector<double>>& _randomVariables,
const std::vector<Tensor>& _solutions) {
330 return solver.
solve();
343 std::vector<std::vector<double>> randomVectors = _measurments.
randomVectors;
344 std::vector<Tensor> solutions = _measurments.
solutions;
348 for(
const auto& sol : solutions) {
351 mean /= double(solutions.size());
354 mean.reinterpret_dimensions({1, x.
dimensions[0], 1});
355 baseTerm.set_component(0, mean);
359 baseTerm.assume_core_position(0);
362 mean.reinterpret_dimensions({x.
dimensions[0]});
401 LOG(UQ_ADF,
"Pre roundign ranks: " << x.
ranks());
403 LOG(UQ_ADF,
"Post roundign ranks: " << x.
ranks());
415 randomVectors.push_back(_rndvec);
416 solutions.push_back(_solution);
420 initialRandomVectors.push_back(_rndvec);
421 initialSolutions.push_back(_solution);
425 std::pair<std::vector<std::vector<double>>, std::vector<Tensor>>
uq_mc(
const TTTensor& _x,
const size_t _N,
const size_t _numSpecial) {
427 std::normal_distribution<double> dist(0.0, 1.0);
429 std::vector<std::vector<double>> randomVariables;
430 std::vector<Tensor> solutions;
432 randomVariables.reserve(_N);
433 solutions.reserve(_N);
434 for(
size_t i = 0; i < _N; ++i) {
435 randomVariables.push_back(std::vector<double>(_x.
degree()-1));
437 for(
size_t k = _x.
degree()-1; k > 0; --k) {
438 randomVariables[i][k-1] = (k <= _numSpecial?0.3:1.0)*dist(rnd);
444 solutions.push_back(p);
447 return std::make_pair(randomVariables, solutions);
457 std::normal_distribution<double> dist(0.0, 1.0);
460 #pragma omp parallel for 461 for(
size_t i = 0; i < _N; ++i) {
463 for(
size_t k = _x.
degree()-1; k > 0; --k) {
476 return realAvg/double(_N);
void assume_core_position(const size_t _pos)
stores _pos as the current core position without verifying of ensuring that this is the case ...
void calc_left_stack(const size_t _corePosition)
std::vector< Tensor > solutions
InternalSolver(TTTensor &_x, const std::vector< std::vector< double >> &_randomVariables, const std::vector< Tensor > &_solutions)
void reshuffle(Tensor &_out, const Tensor &_base, const std::vector< size_t > &_shuffle)
: Performs a simple reshuffle. Much less powerfull then a full evaluate, but more efficient...
void add_initial(const std::vector< double > &_rndvec, const Tensor &_solution)
double calc_residual_norm(const size_t _corePosition) const
Tensor & component(const size_t _idx)
Complete access to a specific component of the TT decomposition.
void add(const std::vector< double > &_rndvec, const Tensor &_solution)
size_t size
Size of the Tensor – always equal to the product of the dimensions.
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
Tensor calculate_delta(const size_t _corePosition) const
std::vector< std::vector< double > > initialRandomVectors
The main namespace of xerus.
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
void calc_right_stack(const size_t _corePosition)
double calculate_norm_A_projGrad(const Tensor &_delta, const size_t _corePosition) const
static Tensor XERUS_warn_unused ones(DimensionTuple _dimensions)
: Returns a Tensor with all entries equal to one.
void reinterpret_dimensions(DimensionTuple _newDimensions)
Reinterprets the dimensions of the tensor.
Header file for the ADF algorithm and its variants.
const ADFVariant ADF
Default variant of the ADF algorithm.
static std::vector< std::vector< Tensor > > create_positions(const TTTensor &_x, const std::vector< std::vector< double >> &_randomVariables)
size_t degree() const
Gets the degree of the TensorNetwork.
static double calc_solutions_norm(const std::vector< Tensor > &_solutions)
Tensor uq_avg(const TTTensor &_x, const size_t _N, const size_t _numSpecial)
static Tensor XERUS_warn_unused dirac(DimensionTuple _dimensions, const MultiIndex &_position)
: Returns a Tensor with a single entry equals one and all other zero.
void move_core(const size_t _position, const bool _keepRank=false)
Move the core to a new position.
void contract(Tensor &_result, const Tensor &_lhs, const bool _lhsTrans, const Tensor &_rhs, const bool _rhsTrans, const size_t _numModes)
Low-level contraction between Tensors.
std::vector< size_t > ranks() const
Gets the ranks of the TTNetwork.
Header file for several elementary numerical methods: polynomials, romberg integration and limit extr...
void set_component(const size_t _idx, Tensor _T)
Sets a specific component of the TT decomposition.
void uq_adf(TTTensor &_x, const std::vector< std::vector< double >> &_randomVariables, const std::vector< Tensor > &_solutions)
std::vector< std::vector< double > > randomVectors
Header file for the low level array support function.
Header file for comfort functions and macros that should not be exported in the library.
Tensor randVar_to_position(const double _v, const size_t _polyDegree)
static XERUS_force_inline value_t frob_norm(const Tensor &_tensor)
Calculates the frobenius norm of the given tensor.
constexpr T sqr(const T &_a) noexcept
: Calculates _a*_a.
double value_t
The type of values to be used by xerus.
std::vector< Tensor > initialSolutions
size_t rank(const size_t _i) const
Gets the rank of a specific egde of the TTNetwork.
std::pair< std::vector< std::vector< double > >, std::vector< Tensor > > uq_mc(const TTTensor &_x, const size_t _N, const size_t _numSpecial)
constexpr bool hard_equal(const T _a, const T _b) noexcept
: Checks for hard equality ( == operator) without compiler warnings.
constexpr T pow(const T &_base, const uint32 _exp) noexcept
: Calculates _base^_exp by binary exponentiation
void round(const std::vector< size_t > &_maxRanks, const double _eps=EPSILON)
Reduce all ranks up to a given accuracy and maximal number.
const Tensor & get_component(const size_t _idx) const
Read access to a specific component of the TT decomposition.
std::vector< size_t > dimensions
Dimensions of the external indices, i.e. the dimensions of the tensor represented by the network...