52 for (
size_t p = 0; p+1 < _data.
ALS.
sites; ++p) {
57 x(j,l&1) = S(j,k) * x(k,l&1);
59 _x.back() = std::move(x);
62 for (
size_t p = _data.
ALS.
sites-1; p>0; --p) {
66 _x[p] = std::move(Vt);
67 x(i&1,k) = x(i&1,j) * S(j,k);
78 REQUIRE(_data.
ALS.
sites == 1,
"ASD only defined for single site alternation at the moment");
81 grad(i&0) =
_b(i&0) - _A(i/2,j/2) * _x[0](j&0);
87 grad(i&0) = _A(j/2,i/2) * grad(j&0);
91 _x[0] += alpha * grad;
106 const size_t d = x.degree();
109 size_t firstOptimizedIndex = 0;
110 size_t dimensionProd = 1;
111 while (firstOptimizedIndex + 1 < d) {
112 const size_t localDim = x.dimensions[firstOptimizedIndex];
113 size_t newDimensionProd = dimensionProd * localDim;
114 if (x.rank(firstOptimizedIndex) < newDimensionProd) {
118 Tensor& curComponent = x.component(firstOptimizedIndex);
120 curComponent(r1,n1,r2) = curComponent(r1,cr1) * x.get_component(firstOptimizedIndex+1)(cr1,n1,r2);
121 x.set_component(firstOptimizedIndex+1, std::move(curComponent));
124 x.set_component(firstOptimizedIndex,
Tensor(
125 {dimensionProd, localDim, newDimensionProd},
126 [&](
const std::vector<size_t> &_idx){
127 if (_idx[0]*localDim + _idx[1] == _idx[2]) {
134 x.require_correct_format();
136 firstOptimizedIndex += 1;
137 dimensionProd = newDimensionProd;
140 size_t firstNotOptimizedIndex = d;
142 while (firstNotOptimizedIndex > firstOptimizedIndex+
ALS.
sites) {
143 const size_t localDim = x.
dimensions[firstNotOptimizedIndex-1];
144 size_t newDimensionProd = dimensionProd * localDim;
145 if (x.rank(firstNotOptimizedIndex-2) < newDimensionProd) {
149 Tensor& curComponent = x.component(firstNotOptimizedIndex-1);
151 curComponent(r1,n1,r2) = x.get_component(firstNotOptimizedIndex-2)(r1,n1,cr1) * curComponent(cr1,r2);
152 x.set_component(firstNotOptimizedIndex-2, std::move(curComponent));
155 x.set_component(firstNotOptimizedIndex-1,
Tensor(
156 {newDimensionProd, localDim, dimensionProd},
157 [&](
const std::vector<size_t> &_idx){
158 if (_idx[0] == _idx[1]*dimensionProd + _idx[2]) {
165 x.require_correct_format();
167 firstNotOptimizedIndex -= 1;
168 dimensionProd = newDimensionProd;
171 if (canonicalizeAtTheEnd && corePosAtTheEnd < firstOptimizedIndex) {
172 x.assume_core_position(firstOptimizedIndex);
174 if (canonicalizeAtTheEnd && corePosAtTheEnd >= firstNotOptimizedIndex) {
175 x.assume_core_position(firstNotOptimizedIndex-1);
178 x.move_core(firstOptimizedIndex,
true);
181 optimizedRange = std::pair<size_t, size_t>(firstOptimizedIndex, firstNotOptimizedIndex);
186 Index cr1, cr2, cr3, cr4, r1, r2, r3, r4, n1, n2, n3;
190 res(r1,r2,r3, cr1,cr2,cr3) = x.get_component(_pos)(r1, n1, cr1)
191 * A->get_component(_pos)(r2, n1, n2, cr2)
192 * x.get_component(_pos)(r3, n2, cr3);
194 res(r1,r2,r3,r4, cr1,cr2,cr3, cr4) = x.get_component(_pos)(r1, n1, cr1)
195 * A->get_component(_pos)(r2, n2, n1, cr2)
196 * A->get_component(_pos)(r3, n2, n3, cr3)
197 * x.get_component(_pos)(r4, n3, cr4);
204 Index cr1, cr2, cr3, r1, r2, r3, n1, n2;
207 res(r1,r2, cr1,cr2) = b.get_component(_pos)(r1, n1, cr1)
208 * x.get_component(_pos)(r2, n1, cr2);
210 res(r1,r2,r3, cr1,cr2,cr3) = b.get_component(_pos)(r1, n1, cr1)
211 * A->get_component(_pos)(r2, n1, n2, cr2)
212 * x.get_component(_pos)(r3, n2, cr3);
218 const size_t d = x.degree();
232 localOperatorCache.left.emplace_back(tmpA);
233 localOperatorCache.right.emplace_back(tmpA);
234 rhsCache.left.emplace_back(tmpB);
235 rhsCache.right.emplace_back(tmpB);
237 for (
size_t i = d-1; i > optimizedRange.first +
ALS.
sites - 1; --i) {
239 tmpA(r1&0) = localOperatorCache.right.back()(r2&0) * localOperatorSlice(i)(r1/2, r2/2);
240 localOperatorCache.right.emplace_back(tmpA);
242 tmpB(r1&0) = rhsCache.right.back()(r2&0) * localRhsSlice(i)(r1/2, r2/2);
243 rhsCache.right.emplace_back(tmpB);
245 for (
size_t i = 0; i < optimizedRange.first; ++i) {
247 tmpA(r2&0) = localOperatorCache.left.back()(r1&0) * localOperatorSlice(i)(r1/2, r2/2);
248 localOperatorCache.left.emplace_back((tmpA));
250 tmpB(r2&0) = rhsCache.left.back()(r1&0) * localRhsSlice(i)(r1/2, r2/2);
251 rhsCache.left.emplace_back(tmpB);
260 return frob_norm((*A)(n1/2,n2/2)*x(n2&0) - b(n1&0))/normB;
263 energy_f = residual_f;
271 for (
size_t i=0; i<
ALS.
sites; ++i) {
272 xAx(r2&0) = xAx(r1&0) * localOperatorSlice(currIndex+i)(r1/2, r2/2);
273 bx(r2&0) = bx(r1&0) * localRhsSlice(currIndex+i)(r1/2, r2/2);
275 res() = 0.5*xAx(r1&0) * localOperatorCache.right.back()(r1&0)
276 - bx(r1&0) * rhsCache.right.back()(r1&0);
288 for (
size_t i=0; i<
ALS.
sites; ++i) {
289 xAtAx(r2&0) = xAtAx(r1&0) * localOperatorSlice(currIndex+i)(r1/2, r2/2);
290 bAx(r2&0) = bAx(r1&0) * localRhsSlice(currIndex+i)(r1/2, r2/2);
292 res() = xAtAx(r1&0) * localOperatorCache.right.back()(r1&0)
293 - 2 * bAx(r1&0) * rhsCache.right.back()(r1&0);
294 return std::sqrt(res[0] +
misc::sqr(normB))/normB;
296 energy_f = residual_f;
304 energy_f = residual_f;
311 for (
size_t i=0; i<
ALS.
sites; ++i) {
312 bx(r2&0) = bx(r1&0) * localRhsSlice(currIndex+i)(r1/2, r2/2);
314 res() = 0.5*x.get_component(currIndex)(r1&0) * x.get_component(currIndex)(r1&0)
315 - bx(r1&0) * rhsCache.right.back()(r1&0);
323 :
ALS(_ALS), A(_A), x(_x), b(_b)
324 , targetRank(_x.ranks())
326 , canonicalizeAtTheEnd(_x.canonicalized)
327 , corePosAtTheEnd(_x.corePosition)
385 Index cr1, cr2, cr3, cr4, r1, r2, r3, r4, n1, n2, n3, n4,
x;
388 for (
size_t p=0; p<
sites; ++p) {
389 ATilde(n1^(p+1), n2, r2, n3^(p+1), n4) = ATilde(n1^(p+1), r1, n3^(p+1)) * _data.
A->
get_component(_data.
currIndex+p)(r1, n2, n4, r2);
391 ATilde(n1^(sites+1), n2, n3^(sites+1), n4) = ATilde(n1^(sites+1), r1, n3^(sites+1)) * _data.
localOperatorCache.
right.back()(n2, r1, n4);
393 for (
size_t p=0; p<
sites; ++p) {
394 ATilde(n1^(p+1),n2, r3,r4, n3^(p+1),n4) = ATilde(n1^(p+1), r1,r2, n3^(p+1))
398 ATilde(n1^(sites+1), n2, n3^(sites+1), n4) = ATilde(n1^(sites+1), r1^2, n3^(sites+1)) * _data.
localOperatorCache.
right.back()(n2, r1^2, n4);
405 Index cr1, cr2, cr3, cr4, r1, r2, r3, r4, n1, n2, n3, n4,
x;
409 for (
size_t p=0; p<
sites; ++p) {
412 BTilde(n1^(sites+1),n2) = BTilde(n1^(sites+1), r1) * _data.
rhsCache.
right.back()(r1,n2);
415 for (
size_t p=0; p<
sites; ++p) {
416 BTilde(n1^(p+1), n3, cr1, cr2) = BTilde(n1^(p+1), r1, r2)
420 BTilde(n1^(sites+1),n2) = BTilde(n1^(sites+1), r1^2) * _data.
rhsCache.
right.back()(r1^2,n2);
443 _perfData.
add(residual, _data.
x, flags);
471 _perfData.
add(residual, _data.
x, 0);
485 #ifndef XERUS_DISABLE_RUNTIME_CHECKS 491 if (_Ap !=
nullptr) {
494 for (
size_t i=0; i<_x.
dimensions.size(); ++i) {
501 if (_Ap !=
nullptr) {
502 _perfData <<
"ALS for ||A*x - b||^2, x.dimensions: " << _x.
dimensions <<
'\n' 503 <<
"A.ranks: " << _Ap->
ranks() <<
'\n' 504 <<
"x.ranks: " << _x.
ranks() <<
'\n' 505 <<
"b.ranks: " << _b.
ranks() <<
'\n' 506 <<
"maximum number of half sweeps: " << _numHalfSweeps <<
'\n' 507 <<
"convergence epsilon: " << _convergenceEpsilon <<
'\n';
509 _perfData <<
"ALS for ||x - b||^2, x.dimensions: " << _x.
dimensions <<
'\n' 510 <<
"x.ranks: " << _x.
ranks() <<
'\n' 511 <<
"b.ranks: " << _b.
ranks() <<
'\n' 512 <<
"maximum number of half sweeps: " << _numHalfSweeps <<
'\n' 513 <<
"convergence epsilon: " << _convergenceEpsilon <<
'\n';
532 if (_Ap !=
nullptr) {
533 std::vector<Tensor> tmpX;
534 for (
size_t p=0; p<
sites; ++p) {
538 for (
size_t p=0; p<
sites; ++p) {
543 REQUIRE(
sites==1,
"approximation dmrg not implemented yet");
Header file for some additional math functions.
Helper class to allow an intuitive syntax for SVD factorisations.
TensorNetwork construct_local_operator(ALSAlgorithmicData &_data) const
value_t lastEnergy2
energy at the end of last full sweep
static void lapack_solver(const TensorNetwork &_A, std::vector< Tensor > &_x, const TensorNetwork &_b, const ALSAlgorithmicData &_data)
local solver that calls the corresponding lapack routines (LU solver)
const ALSVariant ASD
default variant of the alternating steepest descent for non-symmetric operators
value_t frob_norm() const
Calculates the frobenious norm of the tensor.
const ALSVariant ASD_SPD
default variant of the alternating steepest descent for symmetric positive-definite operators ...
Header file for the Index class.
Header file for the IndexedTensorMoveable class.
Header file defining lists of indexed tensors.
value_t energy
current value of the energy residual
Tensor & component(const size_t _idx)
Complete access to a specific component of the TT decomposition.
Very general class used to represent arbitary tensor networks.
std::function< value_t()> energy_f
the energy functional used for this calculation
TensorNetwork localOperatorSlice(size_t _pos)
ContractedTNCache rhsCache
stacks for the right-hand-side (either xb or xAtb)
const ALSVariant ALS
default variant of the single-site ALS algorithm for non-symmetric operators using the lapack solver ...
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
Header file for the classes defining factorisations of Tensors.
const size_t FLAG_FINISHED_HALFSWEEP
Specialized TensorNetwork class used to represent TTTensor and TToperators.
size_t halfSweepCount
current count of halfSweeps
The main namespace of xerus.
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
Header file for the ALS algorithm and its variants.
size_t corePosAtTheEnd
core position that should be restored at the end of the algorithm
void prepare_stacks()
prepares the initial stacks for the local operator and local right-hand-side
void move_to_next_index()
performs one step in direction, updating all stacks
void prepare_x_for_als()
Finds the range of notes that need to be optimized and orthogonalizes _x properly.
std::pair< size_t, size_t > optimizedRange
range of indices for the nodes of _x that need to be optimized
uint sites
the number of sites that are simultaneously optimized
static Tensor XERUS_warn_unused ones(DimensionTuple _dimensions)
: Returns a Tensor with all entries equal to one.
std::function< value_t()> residual_f
the functional to calculate the current residual
void reinterpret_dimensions(DimensionTuple _newDimensions)
Reinterprets the dimensions of the tensor.
Wrapper class for all ALS variants (dmrg etc.)
void solve(internal::IndexedTensorWritable< Tensor > &&_x, internal::IndexedTensorReadOnly< Tensor > &&_A, internal::IndexedTensorReadOnly< Tensor > &&_b)
const TTTensor & b
global right-hand-side
size_t degree() const
Gets the degree of the TensorNetwork.
bool preserveCorePosition
if true the core will be moved to its original position at the end
TensorNetwork construct_local_RHS(ALSAlgorithmicData &_data) const
void move_core(const size_t _position, const bool _keepRank=false)
Move the core to a new position.
const TTOperator * A
global operator A
std::vector< size_t > ranks() const
Gets the ranks of the TTNetwork.
bool assumeSPD
if true the operator A will be assumed to be symmetric positive definite
size_t currIndex
position that is currently being optimized
double solve(const TTOperator *_Ap, TTTensor &_x, const TTTensor &_b, size_t _numHalfSweeps, value_t _convergenceEpsilon, PerformanceData &_perfData=NoPerfData) const
std::vector< size_t > targetRank
rank for the final x
void set_component(const size_t _idx, Tensor _T)
Sets a specific component of the TT decomposition.
value_t lastEnergy
energy at the end of last halfsweep
Header file for comfort functions and macros that should not be exported in the library.
void choose_energy_functional()
chooses the fitting energy functional according to settings and whether an operator A was given ...
static XERUS_force_inline value_t frob_norm(const Tensor &_tensor)
Calculates the frobenius norm of the given tensor.
bool check_for_end_of_sweep(ALSAlgorithmicData &_data, size_t _numHalfSweeps, value_t _convergenceEpsilon, PerformanceData &_perfData) const
bool useResidualForEndCriterion
calculates the residual to decide if the ALS converged. recommended if _perfdata is given...
constexpr T sqr(const T &_a) noexcept
: Calculates _a*_a.
TTTensor & x
current iterate x
Header file for shorthand notations that are xerus specific but used throughout the library...
double value_t
The type of values to be used by xerus.
TensorNetwork localRhsSlice(size_t _pos)
static void ASD_solver(const TensorNetwork &_A, std::vector< Tensor > &_x, const TensorNetwork &_b, const ALSAlgorithmicData &_data)
const ALSVariant & ALS
the algorithm this data belongs to
virtual void require_correct_format() const override
Tests whether the network resembles that of a TTTensor and checks consistency with the underlying ten...
const ALSVariant DMRG
default variant of the two-site DMRG algorithm for non-symmetric operators using the lapack solver ...
Class used to represent indices that can be used to write tensor calculations in index notation...
bool canonicalizeAtTheEnd
whether _x should be canonicalized at the end
const size_t FLAG_FINISHED_FULLSWEEP
ALSAlgorithmicData(const ALSVariant &, const TTOperator *, TTTensor &, const TTTensor &)
Direction direction
direction of current sweep
std::vector< Tensor > right
const ALSVariant DMRG_SPD
default variant of the two-site DMRG algorithm for symmetric positive-definite operators using the la...
Header file for the TensorNetwork class.
const ALSVariant ALS_SPD
default variant of the single-site ALS algorithm for symmetric positive-definite operators using the ...
std::vector< Tensor > left
const Tensor & get_component(const size_t _idx) const
Read access to a specific component of the TT decomposition.
ContractedTNCache localOperatorCache
stacks for the local operator (either xAx or xAtAx)
std::vector< size_t > dimensions
Dimensions of the external indices, i.e. the dimensions of the tensor represented by the network...