45 template<
bool isOperator>
49 template<
bool isOperator>
51 TTNetwork(_tensor, _eps,
std::vector<size_t>(_tensor.degree() == 0 ? 0 : _tensor.degree()/N-1, _maxRank)) {}
54 template<
bool isOperator>
57 template<
bool isOperator>
59 dimensions = std::move(_dimensions);
61 REQUIRE(dimensions.size()%N==0,
"Illegal degree for TTOperator.");
62 REQUIRE(!misc::contains(dimensions,
size_t(0)),
"Zero is no valid dimension.");
63 const size_t numComponents = dimensions.size()/N;
65 if (numComponents == 0) {
66 nodes.emplace_back(std::make_unique<Tensor>());
71 externalLinks.reserve(dimensions.size());
72 for (
size_t i = 0; i < numComponents; ++i) {
73 externalLinks.emplace_back(i+1, 1, dimensions[i],
false);
77 for (
size_t i = 0; i < numComponents; ++i) {
78 externalLinks.emplace_back(i+1, 2, dimensions[numComponents+i],
false);
82 std::vector<TensorNetwork::Link> neighbors;
84 neighbors.emplace_back(1, 0, 1,
false);
86 nodes.emplace_back(std::make_unique<Tensor>(Tensor::ones({1})), std::move(neighbors));
88 for (
size_t i = 0; i < numComponents; ++i) {
90 neighbors.emplace_back(i, i==0 ? 0 : N+1, 1,
false);
91 neighbors.emplace_back(-1, i, dimensions[i],
true);
92 if(isOperator) { neighbors.emplace_back(-1, i+numComponents, dimensions[numComponents+i],
true); }
93 neighbors.emplace_back(i+2, 0, 1,
false);
96 nodes.emplace_back( std::make_unique<Tensor>(Tensor::dirac({1, dimensions[i], 1}, 0)), std::move(neighbors) );
98 nodes.emplace_back( std::make_unique<Tensor>(Tensor::dirac({1, dimensions[i], dimensions[numComponents+i], 1}, 0)), std::move(neighbors) );
103 neighbors.emplace_back(numComponents, N+1, 1,
false);
104 nodes.emplace_back( std::make_unique<Tensor>(Tensor::ones({1})), std::move(neighbors));
107 (*nodes[1].tensorObject)[0] = 0;
111 template<
bool isOperator>
113 REQUIRE(_tensor.
degree()%N==0,
"Number of indicis must be even for TTOperator");
114 REQUIRE(_eps >= 0 && _eps < 1,
"_eps must be positive and smaller than one. " << _eps <<
" was given.");
115 REQUIRE(_maxRanks.size() == num_ranks(),
"We need " << num_ranks() <<
" ranks but " << _maxRanks.size() <<
" where given");
116 REQUIRE(!misc::contains(_maxRanks,
size_t(0)),
"Maximal ranks must be strictly positive. Here: " << _maxRanks);
118 const size_t numComponents = degree()/N;
120 if (_tensor.
degree() == 0) {
121 *nodes[0].tensorObject = _tensor;
129 std::vector<size_t> shuffle(_tensor.
degree());
130 for(
size_t i = 0; i < numComponents; ++i) {
132 shuffle[numComponents + i] = 2*i+1;
141 std::vector<size_t> extDimensions;
142 extDimensions.reserve(remains.
degree()+2);
143 extDimensions.emplace_back(1);
144 extDimensions.insert(extDimensions.end(), remains.
dimensions.begin(), remains.
dimensions.end());
145 extDimensions.emplace_back(1);
149 Tensor singularValues, newNode;
150 for(
size_t position = numComponents-1; position > 0; --position) {
151 calculate_svd(remains, singularValues, newNode, remains, 1+position*N, _maxRanks[position-1], _eps);
153 set_component(position, std::move(newNode));
158 set_component(0, remains);
159 assume_core_position(0);
169 template<
bool isOperator>
171 REQUIRE(_dimensions.size()%N == 0,
"Illegal number of dimensions for ttOperator");
172 REQUIRE(!misc::contains(_dimensions,
size_t(0)),
"Trying to construct a TTTensor with dimension 0 is not possible.");
174 if(_dimensions.empty()) {
179 const size_t numNodes = _dimensions.size()/N;
181 std::vector<size_t> dimensions(isOperator ? 4 : 3, 1);
182 for(
size_t i = 0; i < numNodes; ++i) {
183 dimensions[1] = _dimensions[i];
185 dimensions[2] = _dimensions[i+numNodes];
187 result.set_component(i, Tensor::ones(dimensions));
189 result.canonicalize_left();
194 template<>
template<>
196 REQUIRE(_dimensions.size()%N==0,
"Illegal number of dimensions for ttOperator");
197 REQUIRE(!misc::contains(_dimensions,
size_t(0)),
"Trying to construct a TTTensor with dimension 0 is not possible.");
199 if(_dimensions.empty()) {
203 const size_t numComponents = _dimensions.size()/N;
207 std::vector<size_t> constructionVector(4, 1);
208 for (
size_t i = 0; i < numComponents; ++i) {
209 constructionVector[1] = _dimensions[i];
210 constructionVector[2] = _dimensions[i+numComponents];
211 result.set_component(i,
Tensor(constructionVector, [](
const std::vector<size_t> &_idx){
212 if (_idx[1] == _idx[2]) {
219 result.canonicalize_left();
223 template<
bool isOperator>
225 REQUIRE(_dimensions.size()%N == 0,
"Illegal number of dimensions for ttOperator");
226 REQUIRE(!misc::contains(_dimensions,
size_t(0)),
"Trying to construct a TTNetwork with dimension 0 is not possible.");
228 if(_dimensions.empty()) {
233 const size_t numNodes = _dimensions.size()/N;
235 const auto minN = misc::min(_dimensions);
238 std::vector<size_t> dimensions;
239 for(
size_t i = 0; i < numNodes; ++i) {
240 dimensions.reserve(4);
241 if(i > 0) { dimensions.push_back(minN); }
242 dimensions.push_back(_dimensions[i]);
243 if (isOperator) { dimensions.push_back(_dimensions[i+numNodes]); }
244 if(i+1 < numNodes) { dimensions.push_back(minN); }
245 auto newComp = Tensor::kronecker(dimensions);
246 if(i == 0) { dimensions.insert(dimensions.begin(), 1); }
247 if(i+1 == numNodes) { dimensions.insert(dimensions.end(), 1); }
248 if(i == 0 || i+1 == numNodes) { newComp.reinterpret_dimensions(std::move(dimensions)); }
249 result.set_component(i, std::move(newComp));
252 result.canonicalize_left();
256 template<
bool isOperator>
258 REQUIRE(_dimensions.size()%N==0,
"Illegal number of dimensions for ttOperator");
259 REQUIRE(!misc::contains(_dimensions,
size_t(0)),
"Trying to construct a TTTensor with dimension 0 is not possible.");
260 REQUIRE(_dimensions.size() == _position.size(),
"Inconsitend number of entries in _dimensions and _position.");
262 const size_t numComponents = _dimensions.size()/N;
264 if(numComponents <= 1) {
265 return TTNetwork(Tensor::dirac(_dimensions, _position));
270 for (
size_t i = 0; i < numComponents; ++i) {
280 template<
bool isOperator>
282 return dirac(_dimensions, Tensor::position_to_multiIndex(_position, _dimensions));
288 #ifndef XERUS_DISABLE_RUNTIME_CHECKS 289 template<
bool isOperator>
291 require_valid_network();
293 const size_t numComponents = degree()/N;
294 const size_t numNodes = degree() == 0 ? 1 : degree()/N + 2;
295 REQUIRE(nodes.size() == numNodes,
"Wrong number of nodes: " << nodes.size() <<
" expected " << numNodes <<
".");
296 REQUIRE(!canonicalized || (degree() == 0 && corePosition == 0) || corePosition < numComponents,
"Invalid corePosition: " << corePosition <<
" there are only " << numComponents <<
" components.");
299 for (
size_t n = 0; n < externalLinks.size(); ++n) {
301 REQUIRE(l.
other == (n%numComponents)+1,
"The " << n <<
"-th external link must point the the " << (n%numComponents) <<
"-th component (i.e. the " << (n%numComponents)+1 <<
"-th node) but does point to the " << l.
other <<
"-th node.");
306 REQUIRE(nodes.front().degree() == 1,
"The left virtual node must have degree 1, but has size " << nodes.front().degree());
307 REQUIRE(nodes.front().neighbors[0].dimension == 1,
"The left virtual node's single dimension must be 1, but is " << nodes.front().neighbors[0].dimension);
308 REQUIRE(nodes.front().neighbors[0].other == 1,
"The left virtual node's single link must be to node 1, but is towards node " << nodes.front().neighbors[0].other);
309 REQUIRE(nodes.front().neighbors[0].indexPosition == 0,
"The left virtual node's single link must link at indexPosition 0, but link at " << nodes.front().neighbors[0].indexPosition);
310 REQUIRE(
misc::hard_equal((*nodes.front().tensorObject)[0], 1.0),
"The left virtual node's single entry must be 1.0, but it is " << (*nodes.front().tensorObject)[0]);
311 REQUIRE(!nodes.front().tensorObject->has_factor(),
"The left virtual node must no carry a non-trivial factor.");
313 REQUIRE(nodes.back().degree() == 1,
"The right virtual node must have degree 1, but has size " << nodes.back().degree());
314 REQUIRE(nodes.back().neighbors[0].dimension == 1,
"The right virtual node's single dimension must be 1, but is " << nodes.back().neighbors[0].dimension);
315 REQUIRE(nodes.back().neighbors[0].other == numNodes-2,
"The right virtual node's single link must be to node " << numNodes-2 <<
", but is towards node " << nodes.back().neighbors[0].other);
316 REQUIRE(nodes.back().neighbors[0].indexPosition == N+1,
"The right virtual node's single link must link at indexPosition " << N+1 <<
", but link at " << nodes.back().neighbors[0].indexPosition);
317 REQUIRE(
misc::hard_equal((*nodes.back().tensorObject)[0], 1.0),
"The right virtual node's single entry must be 1.0, but it is " << (*nodes.back().tensorObject)[0]);
318 REQUIRE(!nodes.back().tensorObject->has_factor(),
"The right virtual node must no carry a non-trivial factor.");
322 for (
size_t n = 0; n < numComponents; ++n) {
325 REQUIRE(!canonicalized || n == corePosition || !node.
tensorObject->has_factor(),
"In canonicalized TTNetworks only the core may carry a non-trivial factor. Violated by component " << n <<
" factor: " << node.
tensorObject->factor <<
" corepos: " << corePosition);
327 REQUIRE(node.
degree() == N+2,
"Every TT-Component must have degree " << N+2 <<
", but component " << n <<
" has degree " << node.
degree());
328 REQUIRE(!node.
neighbors[0].external,
"The first link of each TT-Component must not be external. Violated by component " << n);
329 REQUIRE(node.
neighbors[0].other == n,
"The first link of each TT-Component must link to the previous node. Violated by component " << n <<
", which instead links to node " << node.
neighbors[0].other <<
" (expected " << n <<
").");
330 REQUIRE(node.
neighbors[0].indexPosition == (n==0?0:N+1),
"The first link of each TT-Component must link to the last last index of the previous node. Violated by component " << n <<
", which instead links to index " << node.
neighbors[0].indexPosition <<
" (expected " << (n==0?0:N+1) <<
").");
332 REQUIRE(node.
neighbors[1].external,
"The second link of each TT-Component must be external. Violated by component " << n <<
".");
333 REQUIRE(node.
neighbors[1].indexPosition == n,
"The second link of each TT-Component must link to the external dimension equal to the component position. Violated by component " << n <<
" which links at " << node.
neighbors[1].indexPosition);
334 REQUIRE(!isOperator || node.
neighbors[2].external,
"The third link of each TTO-Component must be external. Violated by component " << n <<
".");
335 REQUIRE(!isOperator || node.
neighbors[2].indexPosition == numComponents+n,
"The third link of each TTO-Component must link to the external dimension equal to the component position + numComponents. Violated by component " << n <<
" which links at " << node.
neighbors[2].indexPosition <<
" (expected " << numComponents+n <<
").");
337 REQUIRE(!node.
neighbors.back().external,
"The last link of each TT-Component must not be external. Violated by component " << n);
338 REQUIRE(node.
neighbors.back().other == n+2,
"The last link of each TT-Component must link to the next node. Violated by component " << n <<
", which instead links to node " << node.
neighbors.back().other <<
" (expected " << n+2 <<
").");
339 REQUIRE(node.
neighbors.back().indexPosition == 0,
"The last link of each TT-Component must link to the first index of the next node. Violated by component " << n <<
", which instead links to index " << node.
neighbors.back().indexPosition <<
" (expected 0).");
343 template<
bool isOperator>
348 template<
bool isOperator>
350 const size_t numComponents = dimensions.size()/N;
351 for (
size_t i = 0; i < numComponents; ++i) {
362 template<
bool isOperator>
364 return degree() == 0 ? 0 : degree()/N-1;
369 template<
bool isOperator>
371 const size_t numComponents = _dimensions.size()/N;
372 REQUIRE(_dimensions.size()%N == 0,
"invalid number of dimensions for TTOperator");
373 REQUIRE(numComponents == _ranks.size()+1,
"Invalid number of ranks ("<<_ranks.size()<<
") or dimensions ("<<_dimensions.size()<<
") given.");
377 for (
size_t i = 0; i+1 < numComponents; ++i) {
378 currMax *= _dimensions[i];
379 if (isOperator) { currMax *= _dimensions[numComponents+i]; }
381 if (currMax < _ranks[i]) {
390 for (
size_t i = 1; i < numComponents; ++i) {
391 currMax *= _dimensions[numComponents-i];
392 if (isOperator) { currMax *= _dimensions[2*numComponents-i]; }
394 if (currMax < _ranks[numComponents-i-1]) {
395 _ranks[numComponents-i-1] = currMax;
397 currMax = _ranks[numComponents-i-1];
405 template<
bool isOperator>
407 if (_dimensions.empty()) {
return 1; }
408 const size_t numComponents = _dimensions.size()/N;
409 REQUIRE(_dimensions.size()%N == 0,
"invalid number of dimensions for TTOperator");
410 REQUIRE(numComponents == _ranks.size()+1,
"Invalid number of ranks ("<<_ranks.size()<<
") or dimensions ("<<_dimensions.size()<<
") given.");
412 for (
size_t i=0; i<numComponents; ++i) {
413 size_t component = i==0? 1 : _ranks[i-1];
414 component *= _dimensions[i];
415 if (isOperator) { component *= _dimensions[i+numComponents]; }
416 if (i<_ranks.size()) { component *= _ranks[i]; }
419 for (
const auto r : _ranks) {
425 template<
bool isOperator>
427 return degrees_of_freedom(dimensions, ranks());
431 template<
bool isOperator>
433 REQUIRE(!isOperator,
"fix_mode(), does not work for TTOperators, if applicable cast to TensorNetwork first");
434 TensorNetwork::fix_mode(_mode, _slatePosition);
437 template<
bool isOperator>
439 TensorNetwork::resize_mode(_mode, _newDim, _cutPos);
440 if(canonicalized && _newDim != corePosition) {
441 const size_t oldCorePosition = corePosition;
442 const size_t numComponents = degree()/N;
443 move_core(_mode%numComponents);
444 move_core(oldCorePosition);
449 template<
bool isOperator>
451 for (
size_t i = 0; i < degree(); ++i) {
452 component(i).use_dense_representation();
456 template<
bool isOperator>
458 REQUIRE(_idx == 0 || _idx < degree()/N,
"Illegal index " << _idx <<
" in TTNetwork::component, as there are onyl " << degree()/N <<
" components.");
459 return *nodes[degree() == 0 ? 0 : _idx+1].tensorObject;
463 template<
bool isOperator>
465 REQUIRE(_idx == 0 || _idx < degree()/N,
"Illegal index " << _idx <<
" in TTNetwork::get_component.");
466 return *nodes[degree() == 0 ? 0 : _idx+1].tensorObject;
470 template<
bool isOperator>
473 REQUIRE(_idx == 0,
"Illegal index " << _idx <<
" in TTNetwork::set_component");
474 REQUIRE(_T.
degree() == 0,
"Component of degree zero TTNetwork must have degree zero. Given: " << _T.
degree());
475 *nodes[0].tensorObject = std::move(_T);
477 REQUIRE(_idx < degree()/N,
"Illegal index " << _idx <<
" in TTNetwork::set_component");
478 REQUIRE(_T.
degree() == N+2,
"Component " << _idx <<
" must have degree " << N+2 <<
". Given: " << _T.
degree());
482 for (
size_t i = 0; i < N+2; ++i) {
483 currNode.neighbors[i].dimension = currNode.tensorObject->dimensions[i];
484 if (currNode.neighbors[i].external) {
485 externalLinks[currNode.neighbors[i].indexPosition].dimension = currNode.tensorObject->dimensions[i];
486 dimensions[currNode.neighbors[i].indexPosition] = currNode.tensorObject->dimensions[i];
491 canonicalized = canonicalized && (corePosition == _idx);
495 template<
bool isOperator>
497 REQUIRE(_component.
is_sparse(),
"Not usefull (and not implemented) for dense Tensors.");
501 std::vector<std::vector<std::tuple<size_t, size_t, value_t>>> groups(externalDim);
504 const size_t r2 = entry.first%_component.
dimensions.back();
505 const size_t n = (entry.first/_component.
dimensions.back())%externalDim;
506 const size_t r1 = (entry.first/_component.
dimensions.back())/externalDim;
507 groups[n].emplace_back(r1, r2, _component.
factor*entry.second);
514 template<
bool isOperator>
516 require_correct_format();
518 const size_t numComponents = degree()/N;
519 REQUIRE(_position < numComponents,
"Can't split a " << numComponents <<
" component TTNetwork at position " << _position);
525 left.
nodes.push_back(nodes[0]);
526 for (
size_t i = 0; i < _position; ++i) {
529 left.
nodes.push_back(nodes[i+1]);
532 for(
size_t i = 0; i < _position; ++i) {
533 left.
dimensions.push_back(dimensions[i+numComponents]);
534 left.
externalLinks.push_back(externalLinks[i+numComponents]);
537 left.
dimensions.push_back(left.
nodes.back().neighbors.back().dimension);
538 left.
externalLinks.emplace_back(_position, _position==0?0:N+1, left.
nodes.back().neighbors.back().dimension ,
false);
539 left.
nodes.back().neighbors.back().external =
true;
540 left.
nodes.back().neighbors.back().indexPosition = isOperator ? 2*_position-1 : _position;
542 right.
dimensions.push_back(nodes[_position+2].neighbors.front().dimension);
543 right.
externalLinks.emplace_back(_position+2, 0, nodes[_position+2].neighbors.front().dimension ,
false);
545 for(
size_t i = _position+1; i < numComponents; ++i) {
548 right.
nodes.push_back(nodes[i+1]);
551 for(
size_t i = _position+1; i < numComponents+1; ++i) {
552 right.
dimensions.push_back(dimensions[i+numComponents]);
553 right.
externalLinks.push_back(externalLinks[i+numComponents]);
557 right.
nodes.push_back(nodes.back());
559 right.
nodes.front().neighbors.front().external =
true;
560 right.
nodes.front().neighbors.front().indexPosition = _position;
564 link.
other -= _position+2;
572 link.
other -= _position+2;
577 return std::pair<TensorNetwork, TensorNetwork>(std::move(left), std::move(right));
581 template<
bool isOperator>
583 const size_t numComponents = degree()/N;
584 REQUIRE(_position < numComponents || (_position == 0 && degree() == 0),
"Illegal core-position " << _position <<
" chosen for TTNetwork with " << numComponents <<
" components");
585 require_correct_format();
589 for (
size_t n = corePosition; n < _position; ++n) {
590 transfer_core(n+1, n+2, !_keepRank);
594 for (
size_t n = corePosition; n > _position; --n) {
595 transfer_core(n+1, n, !_keepRank);
599 for (
size_t n = 0; n < _position; ++n) {
600 transfer_core(n+1, n+2, !_keepRank);
604 for (
size_t n = numComponents; n > _position+1; --n) {
605 transfer_core(n, n-1, !_keepRank);
609 while (exceeds_maximal_ranks()) {
611 for (
size_t n = _position; n > 0; --n) {
612 transfer_core(n+1, n, !_keepRank);
616 for (
size_t n = 0; n+1 < numComponents; ++n) {
617 transfer_core(n+1, n+2, !_keepRank);
621 for (
size_t n = numComponents; n > _position+1; --n) {
622 transfer_core(n, n-1, !_keepRank);
626 canonicalized =
true;
627 corePosition = _position;
631 template<
bool isOperator>
637 template<
bool isOperator>
639 move_core(degree() == 0 ? 0 : degree()/N-1);
643 template<
bool isOperator>
645 require_correct_format();
646 const size_t numComponents = degree()/N;
647 REQUIRE(_eps < 1,
"_eps must be smaller than one. " << _eps <<
" was given.");
648 REQUIRE(_maxRanks.size()+1 == numComponents || (_maxRanks.empty() && numComponents == 0),
"There must be exactly degree/N-1 maxRanks. Here " << _maxRanks.size() <<
" instead of " << numComponents-1 <<
" are given.");
649 REQUIRE(!misc::contains(_maxRanks,
size_t(0)),
"Trying to round a TTTensor to rank 0 is not possible.");
651 const bool initialCanonicalization = canonicalized;
652 const size_t initialCorePosition = corePosition;
654 canonicalize_right();
656 for(
size_t i = 0; i+1 < numComponents; ++i) {
657 round_edge(numComponents-i, numComponents-i-1, _maxRanks[numComponents-i-2], _eps, 0.0);
660 assume_core_position(0);
662 if(initialCanonicalization) {
663 move_core(initialCorePosition);
668 template<
bool isOperator>
670 round(std::vector<size_t>(num_ranks(), _maxRank),
EPSILON);
674 template<
bool isOperator>
676 REQUIRE( _maxRank > 0,
"MaxRank must be positive");
677 round(
size_t(_maxRank));
681 template<
bool isOperator>
683 round(std::vector<size_t>(num_ranks(), std::numeric_limits<size_t>::max()), _eps);
687 template<
bool isOperator>
689 const size_t numComponents = degree()/N;
690 REQUIRE(_taus.size()+1 == numComponents || (_taus.empty() && numComponents == 0),
"There must be exactly degree/N-1 taus. Here " << _taus.size() <<
" instead of " << numComponents-1 <<
" are given.");
691 require_correct_format();
693 const bool initialCanonicalization = canonicalized;
694 const size_t initialCorePosition = corePosition;
696 canonicalize_right();
698 for(
size_t i = 0; i+1 < numComponents; ++i) {
699 round_edge(numComponents-i, numComponents-i-1, std::numeric_limits<size_t>::max(), 0.0, _taus[i]);
702 assume_core_position(0);
704 if(initialCanonicalization) {
705 move_core(initialCorePosition);
710 template<
bool isOperator>
712 soft_threshold(std::vector<double>(num_ranks(), _tau), _preventZero);
716 template<
bool isOperator>
718 std::vector<size_t> res;
719 res.reserve(num_ranks());
720 for (
size_t n = 1; n+2 < nodes.size(); ++n) {
721 res.push_back(nodes[n].neighbors.back().dimension);
727 template<
bool isOperator>
729 REQUIRE(_i+1 < degree()/N,
"Requested illegal rank " << _i);
730 return nodes[_i+1].neighbors.back().dimension;
734 template<
bool isOperator>
736 REQUIRE(_pos < degree()/N || (degree() == 0 && _pos == 0),
"Invalid core position.");
738 canonicalized =
true;
742 template<
bool isOperator>
747 template<
bool isOperator>
750 std::set<size_t> all;
751 for(
size_t i = 0; i < nodes.size(); ++i) { all.emplace_hint(all.end(), i); }
753 canonicalized =
false;
755 REQUIRE(nodes.size() > 2,
"Invalid TTNetwork");
756 const size_t numComponents = nodes.size()-2;
758 for(
size_t i = 0; i+1 < numComponents; ++i) {
759 if(nodes[i+1].degree() == 2) {
761 if(corePosition == i+1) { corePosition = i; }
762 else if(corePosition != i) { canonicalized =
false; }
768 if(nodes[numComponents].degree() == 2) {
769 if(corePosition == numComponents-1) { corePosition = numComponents-2; }
770 else if(corePosition != numComponents-2) { canonicalized =
false; }
771 contract(numComponents-1, numComponents);
775 INTERNAL_CHECK(corePosition < degree() || !canonicalized,
"Woot");
781 template<
bool isOperator>
783 require_correct_format();
785 return get_component(corePosition).frob_norm();
788 return std::sqrt(
value_t((*
this)(i&0)*(*
this)(i&0)));
796 template<
bool isOperator>
798 REQUIRE(dimensions == _other.
dimensions,
"The dimensions in TT sum must coincide. Given " << dimensions <<
" vs " << _other.
dimensions);
799 require_correct_format();
801 const size_t numComponents = degree()/N;
803 const bool initialCanonicalization = canonicalized;
804 const size_t initialCorePosition = corePosition;
806 if (numComponents <= 1) {
812 for(
size_t position = 0; position < numComponents; ++position) {
814 const Tensor& myComponent = get_component(position);
822 std::vector<size_t> nxtDimensions;
823 nxtDimensions.emplace_back(position == 0 ? 1 : myComponent.
dimensions.front()+otherComponent.
dimensions.front());
824 nxtDimensions.emplace_back(myComponent.
dimensions[1]);
825 if (isOperator) { nxtDimensions.emplace_back(myComponent.
dimensions[2]); }
826 nxtDimensions.emplace_back(position == numComponents-1 ? 1 : myComponent.
dimensions.back()+otherComponent.
dimensions.back());
829 std::unique_ptr<Tensor> newComponent(
new Tensor(std::move(nxtDimensions), newRep));
831 newComponent->offset_add(myComponent, isOperator ? std::vector<size_t>({0,0,0,0}) : std::vector<size_t>({0,0,0}));
833 const size_t leftOffset = position == 0 ? 0 : myComponent.
dimensions.front();
834 const size_t rightOffset = position == numComponents-1 ? 0 : myComponent.
dimensions.back();
836 newComponent->offset_add(otherComponent, isOperator ? std::vector<size_t>({leftOffset,0,0,rightOffset}) : std::vector<size_t>({leftOffset,0,rightOffset}));
838 set_component(position, std::move(*newComponent));
842 if(initialCanonicalization) {
843 move_core(initialCorePosition);
850 template<
bool isOperator>
859 template<
bool isOperator>
861 REQUIRE(!nodes.empty(),
"There must not be a TTNetwork without any node");
864 component(corePosition) *= _factor;
866 component(0) *= _factor;
871 template<
bool isOperator>
873 operator*=(1/_divisor);
890 _me.assign_indices();
891 _other.assign_indices();
893 const TTNetwork*
const meTT =
dynamic_cast<const TTNetwork*
>(_me.tensorObjectReadOnly);
897 const TTTensor*
const otherTT =
dynamic_cast<const TTTensor*
>(_other.tensorObjectReadOnly);
899 const TTOperator*
const otherTTO =
dynamic_cast<const TTOperator*
>(_other.tensorObjectReadOnly);
902 if ((otherTT ==
nullptr) && (otherTTStack ==
nullptr) && (otherTTO ==
nullptr) && (otherTTOStack ==
nullptr)) {
906 bool cannoAtTheEnd =
false;
907 size_t coreAtTheEnd = 0;
908 if (meTT !=
nullptr) {
920 auto midIndexItr = _me.indices.begin();
922 while (spanSum < _me.degree() / 2) {
923 INTERNAL_CHECK(midIndexItr != _me.indices.end(),
"Internal Error.");
924 spanSum += midIndexItr->span;
927 if (spanSum > _me.degree() / 2) {
931 if ((otherTT !=
nullptr) || (otherTTStack !=
nullptr)) {
933 if (std::equal(_me.indices.begin(), midIndexItr, _other.indices.begin()) || std::equal(midIndexItr, _me.indices.end(), _other.indices.begin())) {
935 *_out->tensorObject = *_me.tensorObjectReadOnly;
936 TensorNetwork::add_network_to_network(std::move(*_out), std::move(_other));
943 auto otherMidIndexItr = _other.indices.begin();
945 while (spanSum < _other.degree() / 2) {
946 INTERNAL_CHECK(otherMidIndexItr != _other.indices.end(),
"Internal Error.");
947 spanSum += otherMidIndexItr->span;
950 if (spanSum > _other.degree() / 2) {
954 if ( std::equal(_me.indices.begin(), midIndexItr, _other.indices.begin())
955 || std::equal(midIndexItr, _me.indices.end(), _other.indices.begin())
956 || std::equal(_me.indices.begin(), midIndexItr, otherMidIndexItr)
957 || std::equal(midIndexItr, _me.indices.end(), otherMidIndexItr))
960 *_out->tensorObject = *_me.tensorObjectReadOnly;
961 TensorNetwork::add_network_to_network(std::move(*_out), std::move(_other));
970 template<
bool isOperator>
981 template<
bool isOperator>
983 _me.assign_indices();
984 _other.assign_indices();
987 const TTNetwork* otherTT =
dynamic_cast<const TTNetwork*
>( _other.tensorObjectReadOnly);
989 if (!otherTT && !otherTTStack) {
return false; }
992 if(_me.indices == _other.indices) {
993 REQUIRE(_me.tensorObjectReadOnly->dimensions == _other.tensorObjectReadOnly->dimensions,
"TT sum requires both operants to share the same dimensions.");
994 transposeRHS =
false;
995 }
else if (isOperator) {
997 auto myMidIndexItr = _me.indices.begin();
999 while (spanSum < _me.degree() / 2) {
1000 spanSum += myMidIndexItr->span;
1004 auto otherMidIndexItr = _other.indices.begin();
1006 while (spanSum < _other.degree() / 2) {
1007 spanSum += otherMidIndexItr->span;
1011 if(std::equal(_me.indices.begin(), myMidIndexItr, otherMidIndexItr) && std::equal(myMidIndexItr, _me.indices.end(), _other.indices.begin())) {
1012 transposeRHS =
true;
1021 std::unique_ptr<TTNetwork<isOperator>> meStorage;
1028 usedMe = meStorage.get();
1058 *
static_cast<TTNetwork*
>(_out->tensorObject) += ttOther;
1063 template<
bool isOperator>
1067 _me.assign_indices(_other.degree());
1068 _other.assign_indices();
1069 const size_t numComponents = _other.degree()/N;
1073 const TTNetwork*
const otherTTN =
dynamic_cast<const TTNetwork*
>(_other.tensorObjectReadOnly);
1077 if(otherTTN || otherTTStack) {
1084 if (_me.indices == _other.indices) {
1088 _me.tensorObject->operator=(*_other.tensorObjectReadOnly);
1099 bool transposed =
false;
1101 auto midIndexItr = _me.indices.begin();
1103 while (spanSum < numComponents) {
1104 INTERNAL_CHECK(midIndexItr != _me.indices.end(),
"Internal Error.");
1105 spanSum += midIndexItr->span;
1108 if (spanSum == numComponents) {
1110 auto otherMidIndexItr = _other.indices.begin();
1112 while (spanSum < numComponents) {
1113 INTERNAL_CHECK(otherMidIndexItr != _other.indices.end(),
"Internal Error.");
1114 spanSum += otherMidIndexItr->span;
1117 if (spanSum == numComponents) {
1119 transposed = (std::equal(_me.indices.begin(), midIndexItr, otherMidIndexItr))
1120 && (std::equal(midIndexItr, _me.indices.end(), _other.indices.begin()));
1128 _me.tensorObject->operator=(*_other.tensorObjectReadOnly);
1134 require_correct_format();
1142 if (_other.tensorObjectReadOnly->nodes.size() > 1) {
1143 LOG_ONCE(warning,
"Assigning a general tensor network to TTOperator not yet implemented. casting to fullTensor first");
1145 Tensor otherFull(*_other.tensorObjectReadOnly);
1147 otherReordered(_me.indices) = otherFull(_other.indices);
1150 *_me.tensorObject =
TTNetwork(std::move(otherReordered));
1160 template<
bool isOperator>
1167 template<
bool isOperator>
1174 template<
bool isOperator>
1176 _network *= _factor;
1181 template<
bool isOperator>
1183 _network *= _factor;
1188 template<
bool isOperator>
1190 _network /= _divisor;
1209 template<
bool isOperator>
1217 for (
size_t r1 = 0; r1 < _componentA.
dimensions.front(); ++r1) {
1218 for (
size_t s1 = 0; s1 < _componentB.
dimensions.front(); ++s1) {
1219 for (
size_t n = 0; n < externalDim; ++n) {
1220 for (
size_t r2 = 0; r2 < _componentA.
dimensions.back(); ++r2) {
1221 const size_t offsetA = (r1*externalDim + n)*_componentA.
dimensions.back()+r2;
1222 const size_t offsetB = (s1*externalDim + n)*_componentB.
dimensions.back();
1223 const size_t offsetResult = (((r1*_componentB.
dimensions.front() + s1)*externalDim + n)*_componentA.
dimensions.back()+r2)*_componentB.
dimensions.back();
1229 }
else if(_componentA.
is_dense()) {
1230 const std::vector<std::vector<std::tuple<size_t, size_t, value_t>>> groupedEntriesB = get_grouped_entries<isOperator>(_componentB);
1232 for (
size_t r1 = 0; r1 < _componentA.
dimensions.front(); ++r1) {
1233 for (
size_t n = 0; n < externalDim; ++n) {
1234 for (
size_t r2 = 0; r2 < _componentA.
dimensions.back(); ++r2) {
1235 for(
const std::tuple<size_t, size_t, value_t>& entryB : groupedEntriesB[n]) {
1236 const size_t offsetA = (r1*externalDim + n)*_componentA.
dimensions.back()+r2;
1237 const size_t offsetResult = (((r1*_componentB.
dimensions.front() + std::get<0>(entryB))*externalDim + n)*_componentA.
dimensions.back()+r2)*_componentB.
dimensions.back()+std::get<1>(entryB);
1238 newCompData[offsetResult] = _componentA[offsetA]*std::get<2>(entryB);
1243 }
else if(_componentB.
is_dense()) {
1248 const size_t r2 = entryA.first%_componentA.
dimensions.back();
1249 const size_t n = (entryA.first/_componentA.
dimensions.back())%externalDim;
1250 const size_t r1 = (entryA.first/_componentA.
dimensions.back())/externalDim;
1252 for (
size_t s1 = 0; s1 < _componentB.
dimensions.front(); ++s1) {
1253 const size_t offsetB = (s1*externalDim + n)*_componentB.
dimensions.back();
1254 const size_t offsetResult = (((r1*_componentB.
dimensions.front() + s1)*externalDim + n)*_componentA.
dimensions.back()+r2)*_componentB.
dimensions.back();
1259 const std::vector<std::vector<std::tuple<size_t, size_t, value_t>>> groupedEntriesB = get_grouped_entries<isOperator>(_componentB);
1263 const size_t r2 = entryA.first%_componentA.
dimensions.back();
1264 const size_t n = (entryA.first/_componentA.
dimensions.back())%externalDim;
1265 const size_t r1 = (entryA.first/_componentA.
dimensions.back())/externalDim;
1267 for(
const std::tuple<size_t, size_t, value_t>& entryB : groupedEntriesB[n]) {
1268 dataMap.emplace((((r1*_componentB.
dimensions.front() + std::get<0>(entryB))*externalDim + n)*_componentA.
dimensions.back()+r2)*_componentB.
dimensions.back()+std::get<1>(entryB), _componentA.
factor*entryA.second*std::get<2>(entryB));
1274 template<
bool isOperator>
1276 static constexpr
const size_t N = isOperator?2:1;
1286 const size_t numComponents = _A.
degree() / N;
1288 #pragma omp for schedule(static) 1289 for (
size_t i = 0; i < numComponents; ++i) {
1293 Tensor newComponent(isOperator ?
1297 perform_component_product<isOperator>(newComponent, componentA, componentB);
1299 #pragma omp critical 1301 result.set_component(i, std::move(newComponent));
1318 template<
bool isOperator>
1320 constexpr
size_t N = isOperator?2:1;
1324 if (_lhs.
degree() == 0) {
1331 if (_rhs.
degree() == 0) {
1336 const size_t lhsNumComponents = _lhs.
degree()/N;
1337 const size_t rhsNumComponents = _rhs.
degree()/N;
1340 for (
size_t i=1; i<result.
nodes.size(); ++i) {
1343 if (l.indexPosition >= lhsNumComponents) {
1344 l.indexPosition += rhsNumComponents;
1351 result.
nodes.pop_back();
1353 for (
size_t i = 1; i < _rhs.
nodes.size(); ++i) {
1357 if (l.indexPosition < rhsNumComponents) {
1358 l.indexPosition += lhsNumComponents;
1360 l.indexPosition += 2*lhsNumComponents;
1364 l.indexPosition = N+1;
1366 l.other += lhsNumComponents;
1377 for (
size_t i = 0; i < lhsNumComponents; ++i) {
1383 for (
size_t i = 0; i < rhsNumComponents; ++i) {
1385 result.
externalLinks.emplace_back(lhsNumComponents+i+1, 1, d,
false);
1390 for (
size_t i = 0; i < lhsNumComponents; ++i) {
1395 for (
size_t i = 0; i < rhsNumComponents; ++i) {
1397 result.
externalLinks.emplace_back(lhsNumComponents+i+1, 2, d,
false);
1407 if (result.
nodes[1].tensorObject->has_factor()) {
1408 (*result.
nodes[lhsNumComponents+1].tensorObject) *= result.
nodes[1].tensorObject->factor;
1409 result.
nodes[1].tensorObject->factor = 1.0;
1415 const size_t lastIdx = lhsNumComponents + rhsNumComponents -1;
1417 if (result.
nodes[lastIdx+1].tensorObject->has_factor()) {
1418 (*result.
nodes[lhsNumComponents].tensorObject) *= result.
nodes[lastIdx+1].tensorObject->factor;
1419 result.
nodes[lastIdx+1].tensorObject->factor = 1.0;
1434 template<
bool isOperator>
1440 for (
size_t i = _tensors.size()-1; i > 0; --i) {
1454 template<
bool isOperator>
1456 if(_format == misc::FileFormat::TSV) {
1457 _stream << std::setprecision(std::numeric_limits<value_t>::digits10 + 1);
1460 write_to_stream<size_t>(_stream, 1, _format);
1463 write_to_stream<bool>(_stream, _obj.
canonicalized, _format);
1464 write_to_stream<size_t>(_stream, _obj.
corePosition, _format);
1468 write_to_stream<TensorNetwork>(_stream, _obj, _format);
1474 template<
bool isOperator>
1476 IF_CHECK(
size_t ver = ) read_from_stream<size_t>(_stream, _format);
1477 REQUIRE(ver == 1,
"Unknown stream version to open (" << ver <<
")");
1480 read_from_stream<bool>(_stream, _obj.
canonicalized, _format);
1481 read_from_stream<size_t>(_stream, _obj.
corePosition, _format);
1485 read_from_stream<TensorNetwork>(_stream, _obj, _format);
1487 _obj.require_correct_format();
Header file for some additional math functions.
Header file for CHECK and REQUIRE macros.
template TTNetwork< true > operator-(TTNetwork< true > _lhs, const TTNetwork< true > &_rhs)
Internal representation of an read and write and moveable indexed Tensor or TensorNetwork.
void transpose_if_operator< true >(TTNetwork< true > &_ttNetwork)
const bool cannonicalization_required
value_t factor
Single value representing a constant scaling factor.
size_t degree() const
Returns the degree of the tensor.
void reshuffle(Tensor &_out, const Tensor &_base, const std::vector< size_t > &_shuffle)
: Performs a simple reshuffle. Much less powerfull then a full evaluate, but more efficient...
Header file for the TTNetwork class (and thus TTTensor and TTOperator).
void transpose()
Transpose the TTOperator.
ZeroNode
Internal indicator to prevent the creation of an degree zero node in TensorNetwork constructor...
Header file for the Index class.
#define XERUS_REQUIRE_TEST
Header file for the IndexedTensorMoveable class.
Header file defining lists of indexed tensors.
Very general class used to represent arbitary tensor networks.
Internal representation of an readable indexed Tensor or TensorNetwork.
Header file for the TTStack class.
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
The TensorNode class is used by the class TensorNetwork to store the componentent tensors defining th...
template void stream_reader(std::istream &_stream, TTNetwork< false > &_obj, const misc::FileFormat _format)
Specialized TensorNetwork class used to represent TTTensor and TToperators.
void transpose_if_operator< false >(TTNetwork< false > &)
The main namespace of xerus.
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
template TTNetwork< true > operator+(TTNetwork< true > _lhs, const TTNetwork< true > &_rhs)
value_t * get_dense_data()
Returns a pointer for direct access to the dense data array in row major order.
std::map< size_t, value_t > & get_unsanitized_sparse_data()
Gives access to the internal sparse map, without any checks.
bool canonicalized
Flag indicating whether the TTNetwork is canonicalized.
template void stream_writer(std::ostream &_stream, const TTNetwork< false > &_obj, misc::FileFormat _format)
std::vector< size_t > DimensionTuple
: Represention of the dimensions of a Tensor.
Class representing a link from a TensorNode to another node or an external index. ...
void reinterpret_dimensions(DimensionTuple _newDimensions)
Reinterprets the dimensions of the tensor.
size_t degree() const
Gets the degree of the TensorNetwork.
template TTNetwork< true > entrywise_product(const TTNetwork< true > &_A, const TTNetwork< true > &_B)
Header file for the Tensor class.
void move_core(const size_t _position, const bool _keepRank=false)
Move the core to a new position.
void contract(Tensor &_result, const Tensor &_lhs, const bool _lhsTrans, const Tensor &_rhs, const bool _rhsTrans, const size_t _numModes)
Low-level contraction between Tensors.
TTNetwork()
Constructs an order zero TTNetwork.
tensor_type * tensorObject
Non-const pointer to the tensor object.
FileFormat
possible file formats for tensor storage
void set_component(const size_t _idx, Tensor _T)
Sets a specific component of the TT decomposition.
void copy_scaled(T *const __restrict _x, const T _alpha, const T *const _y, const size_t _n) noexcept
Copys _n entries from _y to _x, simulationously scaling each entry by the factor _alpha. I.e x = alpha*y.
bool external
Flag indicating whether this link correspond to an external index.
int comp(const Tensor &_a, const Tensor &_b)
void reset(DimensionTuple _newDim, const Representation _representation, const Initialisation _init=Initialisation::Zero)
Resets the tensor to the given dimensions and representation.
size_t other
The index of the otherNode this Link links to.
Header file for the low level array support function.
Header file for comfort functions and macros that should not be exported in the library.
size_t degree() const noexcept
constexpr const value_t EPSILON
The default epsilon value in xerus.
const size_t futureCorePosition
std::vector< Link > externalLinks
The open links of the network in order.
size_t indexPosition
IndexPosition on the other node or index of external index.
value_t frob_norm(const IndexedTensorReadOnly< tensor_type > &_idxTensor)
Returns the frobenious norm of the associated tensorObejct.
constexpr T sqr(const T &_a) noexcept
: Calculates _a*_a.
Abstract internal representation of an read and writeable indexed Tensor or TensorNetwork.
Header file for shorthand notations that are xerus specific but used throughout the library...
double value_t
The type of values to be used by xerus.
virtual void require_correct_format() const override
Tests whether the network resembles that of a TTTensor and checks consistency with the underlying ten...
template TTNetwork< false > dyadic_product(const std::vector< TTNetwork< false >> &_tensors)
Class used to represent indices that can be used to write tensor calculations in index notation...
void calculate_svd(Tensor &_U, Tensor &_S, Tensor &_Vt, Tensor _input, const size_t _splitPos, const size_t _maxRank, const value_t _eps)
Low-Level SVD calculation of a given Tensor _input = _U _S _Vt.
bool is_dense() const
Returns whether the current representation is dense.
std::vector< size_t > RankTuple
: Represention of the ranks of a TensorNetwork.
std::map< size_t, value_t > & get_sparse_data()
Returns a reference for direct access to the sparse data map.
std::unique_ptr< Tensor > tensorObject
Save slot for the tensorObject associated with this node.
static XERUS_force_inline std::string to_string(const bool obj)
bool is_sparse() const
Returns whether the current representation is sparse.
value_t * get_unsanitized_dense_data()
Gives access to the internal data pointer, without any checks.
template TTNetwork< true > operator*(const value_t _factor, TTNetwork< true > _network)
std::vector< Link > neighbors
Vector of links defining the connection of this node to the network.
template TTNetwork< true > operator/(TTNetwork< true > _network, const value_t _divisor)
void perform_component_product(Tensor &_newComponent, const Tensor &_componentA, const Tensor &_componentB)
XERUS_force_inline void transpose(double *const __restrict _out, const double *const __restrict _in, const size_t _leftDim, const size_t _rightDim)
constexpr bool hard_equal(const T _a, const T _b) noexcept
: Checks for hard equality ( == operator) without compiler warnings.
std::vector< TensorNode > nodes
The nodes constituting the network. The order determines the ids of the nodes.
const Tensor & get_component(const size_t _idx) const
Read access to a specific component of the TT decomposition.
size_t corePosition
The position of the core.
void transpose_if_operator(TTNetwork< isOperator > &_ttNetwork)
std::vector< std::vector< std::tuple< size_t, size_t, value_t > > > get_grouped_entries(const Tensor &_component)
Internal class used to represent stacks of (possibly multiply) applications of TTOperators to either ...
Representation
Flags indicating the internal representation of the data of Tensor objects.
std::vector< size_t > dimensions
Dimensions of the external indices, i.e. the dimensions of the tensor represented by the network...