36 template<
class MeasurmentSet>
39 for(
const value_t measurement : _measurments.measuredValues) {
40 normMeasuredValues +=
misc::sqr(measurement);
42 return std::sqrt(normMeasuredValues);
46 template<
class MeasurmentSet>
50 const MeasurmentSet& measurments;
63 for (
size_t j = 0; j < degree; ++j) {
64 if (measurments.positions[_a][j] < measurments.positions[_b][j]) {
return true; }
65 if (measurments.positions[_a][j] > measurments.positions[_b][j]) {
return false; }
68 for (
size_t j = degree; j > 0; --j) {
69 if (measurments.positions[_a][j-1] < measurments.positions[_b][j-1]) {
return true; }
70 if (measurments.positions[_a][j-1] > measurments.positions[_b][j-1]) {
return false; }
73 LOG(fatal,
"Measurments must not appear twice.");
84 for (
size_t j = 0; j < degree; ++j) {
85 const int res =
internal::comp(measurments.positions[_a][j], measurments.positions[_b][j]);
86 if(res == -1) {
return true; }
87 if(res == 1) {
return false; }
90 for (
size_t j = degree; j > 0; --j) {
91 const int res =
internal::comp(measurments.positions[_a][j-1], measurments.positions[_b][j-1]);
92 if(res == -1) {
return true; }
93 if(res == 1) {
return false; }
97 LOG(fatal,
"Measurments must not appear twice. ");
102 template<
class MeasurmentSet>
107 Tensor**
const stack(_stackMem.get()+numMeasurments);
110 std::vector<size_t> calculationMap(degree*numMeasurments);
113 size_t numUniqueStackEntries = 0;
116 perfData <<
"Start sorting";
117 std::vector<size_t> reorderedMeasurments(numMeasurments);
118 std::iota(reorderedMeasurments.begin(), reorderedMeasurments.end(), 0);
120 perfData <<
"End sorting " << _forward ;
123 for(
size_t corePosition = 0; corePosition < degree; ++corePosition) {
124 const size_t realId = reorderedMeasurments[0];
125 calculationMap[realId + corePosition*numMeasurments] = realId;
126 ++numUniqueStackEntries;
130 for(
size_t i = 1; i < numMeasurments; ++i) {
131 const size_t realId = reorderedMeasurments[i];
132 const size_t realPreviousId = reorderedMeasurments[i-1];
135 size_t corePosition = _forward ? position : degree-1-position;
138 position < degree &&
approx_equal(measurments.positions[realId][corePosition], measurments.positions[realPreviousId][corePosition]);
139 ++position, corePosition = _forward ? position : degree-1-position)
141 if( realPreviousId < realId ) {
142 calculationMap[realId + corePosition*numMeasurments] = calculationMap[realPreviousId + corePosition*numMeasurments];
143 }
else if(realPreviousId == calculationMap[realPreviousId + corePosition*numMeasurments]) {
144 calculationMap[realPreviousId + corePosition*numMeasurments] = realId;
145 calculationMap[realId + corePosition*numMeasurments] = realId;
146 }
else if( realId < calculationMap[realPreviousId + corePosition*numMeasurments]) {
147 const size_t nextOther = calculationMap[realPreviousId + corePosition*numMeasurments];
148 INTERNAL_CHECK(calculationMap[nextOther + corePosition*numMeasurments] == nextOther,
"IE");
149 calculationMap[realPreviousId + corePosition*numMeasurments] = realId;
150 calculationMap[nextOther + corePosition*numMeasurments] = realId;
151 calculationMap[realId + corePosition*numMeasurments] = realId;
153 calculationMap[realId + corePosition*numMeasurments] = calculationMap[realPreviousId + corePosition*numMeasurments];
157 for( ; position < degree; ++position, corePosition = _forward ? position : degree-1-position) {
158 calculationMap[realId + corePosition*numMeasurments] = realId;
159 ++numUniqueStackEntries;
164 numUniqueStackEntries++;
165 _stackSaveSlot.reset(
new Tensor[numUniqueStackEntries]);
166 size_t usedSlots = 0;
172 for(
size_t i = 0; i < numMeasurments; ++i) {
173 stack[i - 1*numMeasurments] = &_stackSaveSlot[0];
174 stack[i + degree*numMeasurments] = &_stackSaveSlot[0];
177 for(
size_t corePosition = 0; corePosition < degree; ++corePosition) {
178 for(
size_t i = 0; i < numMeasurments; ++i) {
179 if(calculationMap[i + corePosition*numMeasurments] == i) {
180 _updates[corePosition].emplace_back(i);
181 stack[i + corePosition*numMeasurments] = &_stackSaveSlot[usedSlots];
184 stack[i + corePosition*numMeasurments] = stack[calculationMap[i + corePosition*numMeasurments] + corePosition*numMeasurments];
189 INTERNAL_CHECK(usedSlots == numUniqueStackEntries,
"Internal Error.");
190 perfData <<
"We have " << numUniqueStackEntries <<
" unique stack entries. There are " << numMeasurments*degree+1 <<
" virtual stack entries.";
193 template<
class MeasurmentSet>
195 #pragma omp parallel for schedule(static) 196 for(
size_t corePosition = 0; corePosition < degree; ++corePosition) {
197 for(
const size_t i : forwardUpdates[corePosition]) {
200 for(
const size_t i : backwardUpdates[corePosition]) {
206 template<
class MeasurmentSet>
208 std::vector<Tensor> fixedComponents(_component.
dimensions[1]);
210 for(
size_t i = 0; i < _component.
dimensions[1]; ++i) {
211 fixedComponents[i](r1, r2) = _component(r1, i, r2);
214 return fixedComponents;
221 const size_t numUpdates = backwardUpdates[_corePosition].size();
223 std::vector<Tensor> fixedComponents = get_fixed_components(_currentComponent);
226 #pragma omp parallel for schedule(static) 227 for(
size_t u = 0; u < numUpdates; ++u) {
228 const size_t i = backwardUpdates[_corePosition][u];
229 contract(*backwardStack[i + _corePosition*numMeasurments], fixedComponents[measurments.positions[i][_corePosition]],
false, *backwardStack[i + (_corePosition+1)*numMeasurments],
false, 1);
237 const size_t numUpdates = backwardUpdates[_corePosition].size();
239 Tensor reshuffledComponent;
240 reshuffledComponent(i1, r1, r2) = _currentComponent(r1, i1, r2);
245 #pragma omp parallel for firstprivate(mixedComponent) schedule(static) 246 for(
size_t u = 0; u < numUpdates; ++u) {
247 const size_t i = backwardUpdates[_corePosition][u];
248 contract(mixedComponent, measurments.positions[i][_corePosition],
false, reshuffledComponent,
false, 1);
249 contract(*backwardStack[i + _corePosition*numMeasurments], mixedComponent,
false, *backwardStack[i + (_corePosition+1)*numMeasurments],
false, 1);
258 const size_t numUpdates = forwardUpdates[_corePosition].size();
260 const std::vector<Tensor> fixedComponents = get_fixed_components(_currentComponent);
263 #pragma omp parallel for schedule(static) 264 for(
size_t u = 0; u < numUpdates; ++u) {
265 const size_t i = forwardUpdates[_corePosition][u];
266 contract(*forwardStack[i + _corePosition*numMeasurments] , *forwardStack[i + (_corePosition-1)*numMeasurments],
false, fixedComponents[measurments.positions[i][_corePosition]],
false, 1);
274 const size_t numUpdates = forwardUpdates[_corePosition].size();
276 Tensor reshuffledComponent;
277 reshuffledComponent(i1, r1, r2) = _currentComponent(r1, i1, r2);
282 #pragma omp parallel for firstprivate(mixedComponent) schedule(static) 283 for(
size_t u = 0; u < numUpdates; ++u) {
284 const size_t i = forwardUpdates[_corePosition][u];
285 contract(mixedComponent, measurments.positions[i][_corePosition],
false, reshuffledComponent,
false, 1);
286 contract(*forwardStack[i + _corePosition*numMeasurments] , *forwardStack[i + (_corePosition-1)*numMeasurments],
false, mixedComponent,
false, 1);
290 template<
class MeasurmentSet>
295 if(forwardUpdates[_corePosition].size() < backwardUpdates[_corePosition].size()) {
296 update_forward_stack(_corePosition, x.get_component(_corePosition));
298 #pragma omp parallel for firstprivate(currentValue) schedule(static) 299 for(
size_t i = 0; i < numMeasurments; ++i) {
300 contract(currentValue, *forwardStack[i + _corePosition*numMeasurments],
false, *backwardStack[i + (_corePosition+1)*numMeasurments],
false, 1);
301 residual[i] = (measurments.measuredValues[i]-currentValue[0]);
304 update_backward_stack(_corePosition, x.get_component(_corePosition));
306 #pragma omp parallel for firstprivate(currentValue) schedule(static) 307 for(
size_t i = 0; i < numMeasurments; ++i) {
308 contract(currentValue, *forwardStack[i + (_corePosition-1)*numMeasurments],
false, *backwardStack[i + _corePosition*numMeasurments],
false, 1);
309 residual[i] = (measurments.measuredValues[i]-currentValue[0]);
314 template<>
template<>
316 const size_t _localRightRank,
318 const value_t*
const _rightPtr,
321 const size_t& _position,
324 value_t*
const shiftedDeltaPtr = _deltaPtr + _position*_localLeftRank*_localRightRank;
326 for(
size_t k = 0; k < _localLeftRank; ++k) {
327 for(
size_t j = 0; j < _localRightRank; ++j) {
328 shiftedDeltaPtr[k*_localRightRank+j] += _residual * _leftPtr[k] * _rightPtr[j];
333 template<>
template<>
335 const size_t _localRightRank,
337 const value_t*
const _rightPtr,
344 for(
size_t k = 0; k < _localLeftRank; ++k) {
345 for(
size_t j = 0; j < _localRightRank; ++j) {
346 _scratchSpace[k*_localRightRank+j] = _leftPtr[k] * _rightPtr[j];
350 for(
size_t n = 0; n < _position.
size; ++n) {
352 _position[n]*_residual,
354 _localLeftRank*_localRightRank
359 template<
class MeasurmentSet>
361 const size_t localLeftRank = x.get_component(_corePosition).dimensions[0];
362 const size_t localRightRank = x.get_component(_corePosition).dimensions[2];
364 projectedGradientComponent.reset({x.dimensions[_corePosition], localLeftRank, localRightRank});
370 std::unique_ptr<value_t[]> dyadicComponent(std::is_same<MeasurmentSet, RankOneMeasurementSet>::value ?
new value_t[localLeftRank*localRightRank] :
nullptr);
372 #pragma omp for schedule(static) 373 for(
size_t i = 0; i < numMeasurments; ++i) {
374 INTERNAL_CHECK(!forwardStack[i + (_corePosition-1)*numMeasurments]->has_factor() && !backwardStack[i + (_corePosition+1)*numMeasurments]->has_factor(),
"IE");
377 perform_dyadic_product( localLeftRank,
379 forwardStack[i + (_corePosition-1)*numMeasurments]->get_dense_data(),
380 backwardStack[i + (_corePosition+1)*numMeasurments]->get_dense_data(),
381 partialProjGradComp.get_unsanitized_dense_data(),
383 measurments.positions[i][_corePosition],
384 dyadicComponent.get()
391 projectedGradientComponent += partialProjGradComp;
395 projectedGradientComponent(r1, i1, r2) = projectedGradientComponent(i1, r1, r2);
398 template<
class MeasurmentSet>
399 inline size_t position_or_zero(
const MeasurmentSet& _measurments,
const size_t _meas,
const size_t _corePosition);
403 return _measurments.positions[_meas][_corePosition];
413 template<
class MeasurmentSet>
415 std::vector<value_t> normAProjGrad(x.dimensions[_corePosition], 0.0);
420 if(forwardUpdates[_corePosition].size() < backwardUpdates[_corePosition].size()) {
421 update_forward_stack(_corePosition, projectedGradientComponent);
423 #pragma omp parallel firstprivate(currentValue) 425 std::vector<value_t> partialNormAProjGrad(x.dimensions[_corePosition], 0.0);
427 #pragma omp for schedule(static) 428 for(
size_t i = 0; i < numMeasurments; ++i) {
429 contract(currentValue, *forwardStack[i + _corePosition*numMeasurments],
false, *backwardStack[i + (_corePosition+1)*numMeasurments],
false, 1);
436 for(
size_t i = 0; i < normAProjGrad.size(); ++i) {
437 normAProjGrad[i] += partialNormAProjGrad[i];
442 update_backward_stack(_corePosition, projectedGradientComponent);
444 #pragma omp parallel firstprivate(currentValue) 446 std::vector<value_t> partialNormAProjGrad(x.dimensions[_corePosition], 0.0);
448 #pragma omp for schedule(static) 449 for(
size_t i = 0; i < numMeasurments; ++i) {
450 contract(currentValue, *forwardStack[i + (_corePosition-1)*numMeasurments],
false, *backwardStack[i + _corePosition*numMeasurments],
false, 1);
457 for(
size_t i = 0; i < normAProjGrad.size(); ++i) {
458 normAProjGrad[i] += partialNormAProjGrad[i];
464 return normAProjGrad;
470 for(
size_t j = 0; j < x.dimensions[_corePosition]; ++j) {
472 localDelta(r1, r2) = projectedGradientComponent(r1, j, r2);
476 x.component(_corePosition)(r1, i1, r2) = x.component(_corePosition)(r1, i1, r2) + (PyR/_normAProjGrad[j])*
Tensor::dirac({x.dimensions[_corePosition]}, j)(i1)*localDelta(r1, r2);
486 x.component(_corePosition)(r1, i1, r2) = x.component(_corePosition)(r1, i1, r2) + (PyR/misc::sum(_normAProjGrad))*projectedGradientComponent(r1, i1, r2);
489 template<
class MeasurmentSet>
491 double resDec1 = 0.0, resDec2 = 0.0, resDec3 = 0.0;
493 for(; maxIterations == 0 || iteration < maxIterations; ++iteration) {
496 x.move_core(0,
true);
499 for(
size_t corePosition = x.degree()-1; corePosition > 0; --corePosition) {
500 update_backward_stack(corePosition, x.get_component(corePosition));
503 calculate_residual(0);
505 lastResidualNorm = residualNorm;
506 double residualNormSqr = 0;
508 #pragma omp parallel for schedule(static) reduction(+:residualNormSqr) 509 for(
size_t i = 0; i < numMeasurments; ++i) {
510 residualNormSqr +=
misc::sqr(residual[i]);
512 residualNorm = std::sqrt(residualNormSqr)/normMeasuredValues;
514 perfData.add(iteration, residualNorm, x, 0);
517 double resDec4 = resDec3; resDec3 = resDec2; resDec2 = resDec1;
518 resDec1 = residualNorm/lastResidualNorm;
520 if(residualNorm < targetResidualNorm || resDec1*resDec2*resDec3*resDec4 >
misc::pow(minimalResidualNormDecrease, 4)) {
break; }
524 for(
size_t corePosition = 0; corePosition < degree; ++corePosition) {
525 if(corePosition > 0) {
526 calculate_residual(corePosition);
529 calculate_projected_gradient(corePosition);
531 const std::vector<value_t> normAProjGrad = calculate_slicewise_norm_A_projGrad(corePosition);
533 update_x(normAProjGrad, corePosition);
536 if(corePosition+1 < degree) {
537 x.move_core(corePosition+1,
true);
538 update_forward_stack(corePosition, x.get_component(corePosition));
545 template<
class MeasurmentSet>
550 for(
size_t i = 0; i < _measurments.size(); ++i) {
557 for(
size_t i = 0; i < _measurments.size(); ++i) {
559 for(
size_t j = 0; j < _measurments.degree(); ++j) {
560 _norms[i] *= _measurments.positions[i][j].frob_norm();
566 template<
class MeasurmentSet>
570 #pragma omp parallel sections 573 construct_stacks(forwardStackSaveSlots, forwardUpdates, forwardStackMem,
true);
576 construct_stacks(backwardStackSaveSlots, backwardUpdates, backwardStackMem,
false);
582 x.canonicalize_left();
584 resize_stack_tensors();
587 solve_with_current_ranks();
590 while(residualNorm > targetResidualNorm && x.ranks() != maxRanks && (maxIterations == 0 || iteration < maxIterations)) {
592 x.move_core(0,
true);
593 const auto rndTensor =
TTTensor::random(x.dimensions, std::vector<size_t>(x.degree()-1, 1));
599 resize_stack_tensors();
601 solve_with_current_ranks();
std::vector< Tensor > get_fixed_components(const Tensor &_component)
Returns a vector of tensors containing the slices of _component where the second dimension is fixed...
void update_backward_stack(const size_t _corePosition, const Tensor &_currentComponent)
For each measurment sets the backwardStack at the given _corePosition to the contraction between the ...
void update_forward_stack(const size_t _corePosition, const Tensor &_currentComponent)
For each measurment sets the forwardStack at the given _corePosition to the contraction between the f...
Header file for the IndexedTensorMoveable class.
void construct_stacks(std::unique_ptr< xerus::Tensor[] > &_stackSaveSlot, std::vector< std::vector< size_t > > &_updates, const std::unique_ptr< Tensor *[]> &_stackMem, const bool _forward)
Constructes either the forward or backward stack. That is, it determines the groups of partially equa...
Class used to represent a single point measurments.
size_t size
Size of the Tensor – always equal to the product of the dimensions.
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
void solve_with_current_ranks()
Basically the complete algorithm, trying to reconstruct x using its current ranks.
void calculate_projected_gradient(const size_t _corePosition)
: Calculates the component at _corePosition of the projected gradient from the residual, i.e. E(A^T(b-Ax)).
bool approx_equal(T _a, T _b, T _eps=4 *std::numeric_limits< T >::epsilon()) noexcept
: Checks whether the relative difference between _a and _b (i.e. |a-b|/(|a|/2+|b|/2)) is smaller than...
void perform_dyadic_product(const size_t _localLeftRank, const size_t _localRightRank, const value_t *const _leftPtr, const value_t *const _rightPtr, value_t *const _deltaPtr, const value_t _residual, const PositionType &_position, value_t *const _scratchSpace)
Calculates one internal step of calculate_projected_gradient. In particular the dyadic product of the...
The main namespace of xerus.
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
double solve()
Tries to solve the reconstruction problem with the current settings.
static Tensor XERUS_warn_unused ones(DimensionTuple _dimensions)
: Returns a Tensor with all entries equal to one.
Wrapper class for all ADF variants.
const ADFVariant ADF
Default variant of the ADF algorithm.
Header file for the ADF algorithm and its variants.
static Tensor XERUS_warn_unused dirac(DimensionTuple _dimensions, const MultiIndex &_position)
: Returns a Tensor with a single entry equals one and all other zero.
void contract(Tensor &_result, const Tensor &_lhs, const bool _lhsTrans, const Tensor &_rhs, const bool _rhsTrans, const size_t _numModes)
Low-level contraction between Tensors.
std::vector< value_t > calculate_slicewise_norm_A_projGrad(const size_t _corePosition)
: Calculates ||P_n (A(E(A^T(b-Ax)))))|| = ||P_n (A(E(A^T(residual)))))|| = ||P_n (A(E(gradient)))|| f...
void calc_measurment_norm< SinglePointMeasurementSet >(double *_norms, const SinglePointMeasurementSet &_measurments)
MeasurmentComparator(const MeasurmentSet &_measurments, const bool _forward)
void calculate_residual(const size_t _corePosition)
(Re-)Calculates the current residual, i.e. Ax-b.
static TTNetwork XERUS_warn_unused random(std::vector< size_t > _dimensions, const std::vector< size_t > &_ranks, distribution &_dist=xerus::misc::defaultNormalDistribution, generator &_rnd=xerus::misc::randomEngine)
Transforms a given TensorNetwork to a TTNetwork.
int comp(const Tensor &_a, const Tensor &_b)
Header file for the low level array support function.
Header file for comfort functions and macros that should not be exported in the library.
static XERUS_force_inline value_t frob_norm(const Tensor &_tensor)
Calculates the frobenius norm of the given tensor.
constexpr T sqr(const T &_a) noexcept
: Calculates _a*_a.
double value_t
The type of values to be used by xerus.
void resize_stack_tensors()
Resizes the unqiue stack tensors to correspond to the current ranks of x.
size_t position_or_zero< RankOneMeasurementSet >(const RankOneMeasurementSet &, const size_t, const size_t)
void calc_measurment_norm< RankOneMeasurementSet >(double *_norms, const RankOneMeasurementSet &_measurments)
bool approx_equal(const xerus::Tensor &_a, const xerus::Tensor &_b, const xerus::value_t _eps=EPSILON)
Checks whether two tensors are approximately equal.
static double calculate_norm_of_measured_values(const MeasurmentSet &_measurments)
calculates the two-norm of the measured values.
void calc_measurment_norm(double *_norms, const MeasurmentSet &_measurments)
void add_scaled(T *const __restrict _x, const T _alpha, const T *const __restrict _y, const size_t _n) noexcept
Adds _n entries of _y, scaled by _alpha to the ones of _x. I.e. x += alpha*y.
bool operator()(const size_t _a, const size_t _b) const
size_t position_or_zero< SinglePointMeasurementSet >(const SinglePointMeasurementSet &_measurments, const size_t _meas, const size_t _corePosition)
void update_x(const std::vector< value_t > &_normAProjGrad, const size_t _corePosition)
Updates the current solution x. For SinglePointMeasurments the is done for each slice speratly...
constexpr T pow(const T &_base, const uint32 _exp) noexcept
: Calculates _base^_exp by binary exponentiation
size_t position_or_zero(const MeasurmentSet &_measurments, const size_t _meas, const size_t _corePosition)