80 std::vector<TensorNetwork::Link> newLinks;
81 for (
size_t d = 0; d <
dimensions.size(); ++d) {
83 newLinks.emplace_back(-1, d,
dimensions[d],
true);
94 for (
size_t id = 0;
id <
nodes.size(); ++id) {
95 if (!_idF(
id)) {
continue; }
97 for (
size_t i = 0; i < cpy.
nodes[id].neighbors.size(); ++i) {
100 if (!_idF(l.
other)) {
110 size_t correction = 0;
111 std::vector<long> toErase;
112 for (
size_t eid = 0; eid < cpy.
externalLinks.size(); ++eid) {
114 if (!_idF(l.
other)) {
115 toErase.emplace_back(
long(eid));
124 for (
size_t i = toErase.size(); i > 0; --i) {
138 std::set<size_t> all;
139 for(
size_t i = 0; i <
nodes.size(); ++i) { all.emplace_hint(all.end(), i); }
143 std::vector<bool> seen(
nodes.size(),
false);
144 std::vector<size_t> expansionStack;
145 expansionStack.reserve(
nodes.size());
149 if(!seen[el.other]) {
150 seen[el.other] =
true;
151 expansionStack.push_back(el.other);
156 while (!expansionStack.empty()) {
157 const size_t curr = expansionStack.back();
158 expansionStack.pop_back();
162 if ( !n.external && !seen[n.other] ) {
163 seen[n.other] =
true;
164 expansionStack.push_back(n.other);
170 std::set<size_t> toContract;
171 for (
size_t i = 0; i <
nodes.size(); ++i) {
173 toContract.emplace_hint(toContract.end(), i);
178 if (!toContract.empty()) {
179 const size_t remaining =
contract(toContract);
184 nodes[remaining].erased =
true;
185 for(
size_t i = 0; i <
nodes.size(); ++i) {
186 if(!
nodes[i].erased) {
187 *
nodes[i].tensorObject *= (*
nodes[remaining].tensorObject)[0];
202 size_t posA=~0ul, posB=~0ul;
206 for(
size_t i = 0; i <
nodes[_nodeA].neighbors.size(); ++i) {
207 if(
nodes[_nodeA].neighbors[i].other == _nodeB) {
209 REQUIRE(!foundCommon,
"TN round/move core does not work if the two nodes share more than one link.");
214 REQUIRE(foundCommon,
"TN round does not work if the two nodes share no link.");
216 posB =
nodes[_nodeA].neighbors[posA].indexPosition;
218 return std::pair<size_t, size_t>(posA, posB);
223 for (
size_t i = 0; i <
nodes[_nodeId].degree(); ++i) {
225 if (link.
links(_nodeId)) {
228 const std::vector<Link> linkCopy(
nodes[_nodeId].neighbors);
231 const Link& otherLink = linkCopy[j];
240 const Link& otherLink = linkCopy[j];
249 nodes[_nodeId].neighbors.erase(
nodes[_nodeId].neighbors.begin() + i);
259 std::vector<size_t> idMap(
nodes.size());
262 size_t newId = 0, oldId = 0;
263 for (; oldId <
nodes.size(); ++oldId) {
264 if (!
nodes[oldId].erased) {
265 idMap[oldId] = newId;
266 if (newId != oldId) { std::swap(
nodes[newId],
nodes[oldId]); }
281 l.other = idMap[l.other];
290 std::set<size_t> all;
291 for(
size_t i = 0; i <
nodes.size(); ++i) { all.emplace_hint(all.end(), i); }
296 std::vector<size_t> shuffle(
degree());
297 for(
size_t i = 0; i < cpy.
nodes[res].neighbors.size(); ++i) {
299 shuffle[i] = cpy.
nodes[res].neighbors[i].indexPosition;
314 REQUIRE(_position == 0,
"Tried to access non-existing entry of TN");
316 for(
const TensorNode& node :
nodes) { value *= (*node.tensorObject)[0]; }
320 std::vector<size_t> positions(
degree());
321 size_t remains = _position;
322 for(
size_t i =
degree(); i > 1; --i) {
326 positions[0] = remains;
340 size_t killedDimensions = 0;
341 for(
size_t i = 0; i < node.
neighbors.size(); ++i) {
352 for(
size_t i = 0; i < node.
neighbors.size(); ++i) {
362 return (*partialCopy.
nodes[0].tensorObject)[0];
369 REQUIRE(!
nodes.empty(),
"There must not be a TTNetwork without any node");
370 REQUIRE(!
nodes[0].erased,
"There must not be an erased node.");
371 *
nodes[0].tensorObject *= _factor;
376 REQUIRE(!
nodes.empty(),
"There must not be a TTNetwork without any node");
377 REQUIRE(!
nodes[0].erased,
"There must not be an erased node.");
378 *
nodes[0].tensorObject /= _divisor;
420 _other.assign_indices();
424 _me.assign_indices();
427 const std::vector<size_t> otherDimensions = other.
dimensions;
430 for (
size_t i = 0, dimPosA = 0; i < _me.indices.size(); dimPosA += _me.indices[i].span, ++i) {
431 size_t j = 0, dimPosB = 0;
432 while (_me.indices[i] != _other.indices[j]) {
433 dimPosB += _other.indices[j].span;
435 REQUIRE( j < _other.indices.size(),
"LHS Index " << _me.indices[i] <<
" not found in RHS " << _other.indices);
438 REQUIRE(_me.indices[i].span == _other.indices[j].span,
"Index spans must coincide");
440 for (
size_t s = 0; s < _me.indices[i].span; ++s) {
441 me.
dimensions[dimPosA+s] = otherDimensions[dimPosB+s];
458 result += node.tensorObject->size;
464 std::vector<TensorNode> newNodes(
nodes.size());
466 for (
size_t i = 0; i <
nodes.size(); ++i) {
467 if (
nodes[i].erased) {
continue; }
468 const size_t newIndex = _f(i);
469 newSize = std::max(newSize, newIndex+1);
470 REQUIRE(newNodes[newIndex].erased,
"Tried to shuffle two nodes to the same new position " << newIndex <<
" i= " << i);
471 newNodes[newIndex] =
nodes[i];
473 if (!l.external) { l.other = _f(l.other); }
477 nodes.resize(newSize);
480 link.other = _f(link.other);
484 #ifndef XERUS_DISABLE_RUNTIME_CHECKS 487 REQUIRE(!
nodes.empty(),
"There must always be at least one node!");
492 REQUIRE(el.
other <
nodes.size(),
"External link " << n <<
" is inconsitent. The linked node " << el.
other <<
" does not exist, as there are only " <<
nodes.size() <<
" nodes.");
493 REQUIRE(el.
dimension > 0,
"External link " << n <<
" is corrupted. The link specifies zero as dimension.");
494 REQUIRE(el.
dimension ==
dimensions[n],
"External link " << n <<
" is inconsitent. The specified dimension " << el.
dimension <<
" does not match the " << n <<
"-th dimension of the Network, which is " <<
dimensions[n] <<
".");
495 REQUIRE(!el.
external,
"External link " << n <<
" is corrupted. It specifies itself to be external, but must link to a node.");
500 REQUIRE(otherLink.
external,
"External link " << n <<
" is inconsitent. The link points to node " << el.
other <<
" at IP " << el.
indexPosition <<
", but the target link says it is not external.");
502 REQUIRE(otherLink.
dimension == el.
dimension,
"External link " << n <<
" is inconsitent. The link points to node " << el.
other <<
" at IP " << el.
indexPosition <<
". The dimension specified by the external link is " << el.
dimension <<
" but the one of the target link is " << otherLink.
dimension <<
".");
506 for (
size_t n = 0; n <
nodes.size(); ++n) {
508 REQUIRE(!_check_erased || !currNode.
erased,
"Node " << n <<
" is marked erased, although this was not allowed.");
510 REQUIRE(currNode.
degree() == currNode.
tensorObject->degree(),
"Node " << n <<
" has is inconsitent, as its tensorObject has degree " << currNode.
tensorObject->degree() <<
" but there are " << currNode.
degree() <<
" links.");
514 for (
size_t i = 0; i < currNode.
neighbors.size(); ++i) {
522 REQUIRE(el.
other <
nodes.size(),
"Inconsitent Link from node " << n <<
" to node " << el.
other <<
" from IP " << i <<
" to IP " << el.
indexPosition <<
". The target node does not exist, as there are only " <<
nodes.size() <<
" nodes.");
524 REQUIRE(other.degree() > el.
indexPosition,
"Inconsitent Link from node " << n <<
" to node " << el.
other <<
" from IP " << i <<
" to IP " << el.
indexPosition <<
". Link at target does not exist as there are only " << other.degree() <<
" links.");
525 REQUIRE(!other.neighbors[el.
indexPosition].external,
"Inconsitent Link from node " << n <<
" to node " << el.
other <<
" from IP " << i <<
" to IP " << el.
indexPosition <<
". Link at target says it is external.");
527 REQUIRE(other.neighbors[el.
indexPosition].indexPosition == i,
"Inconsitent Link from node " << n <<
" to node " << el.
other <<
" from IP " << i <<
" to IP " << el.
indexPosition <<
". Link at target links to node " << other.neighbors[el.
indexPosition].other <<
" at IP " << other.neighbors[el.
indexPosition].indexPosition);
554 _base.assign_indices();
555 _toInsert.assign_indices();
558 const TensorNetwork &toInsert = *_toInsert.tensorObjectReadOnly;
560 const size_t firstNew = base.
nodes.size();
564 _base.indices.insert(_base.indices.end(), _toInsert.indices.begin(), _toInsert.indices.end());
570 for (
const Index &idx : _base.indices) {
571 REQUIRE(misc::count(_base.indices, idx) < 3,
"Index must not appear three (or more) times.");
576 for (
size_t i = firstNewExternal; i < base.
externalLinks.size(); ++i) {
581 for (
size_t i = firstNew; i < base.
nodes.size(); ++i) {
586 l.indexPosition += firstNewExternal;
594 _base.tensorObject->require_valid_network();
600 _base.assign_indices();
604 IF_CHECK( std::set<Index> contractedIndices; )
606 size_t passedDegree = 0;
607 for(
size_t i = 0; i < _base.indices.size(); ) {
608 const Index& idx = _base.indices[i];
612 size_t passedDegreeSecond = passedDegree + idx.
span;
613 for( ; j < _base.indices.size(); passedDegreeSecond += _base.indices[j].span, ++j) {
614 if(idx == _base.indices[j]) {
break; }
617 if(j < _base.indices.size()) {
618 REQUIRE(!misc::contains(contractedIndices, idx),
"Indices must occur at most twice per contraction");
619 REQUIRE(idx.
span == _base.indices[j].span,
"Index spans do not coincide " << idx <<
" vs " << _base.indices[j]);
620 IF_CHECK( contractedIndices.insert(idx); )
622 for (
size_t n = 0; n < idx.
span; ++n) {
625 REQUIRE(link1.
dimension == link2.
dimension,
"Index dimensions do not coincide: ["<<n<<
"] " << link1.
dimension <<
" vs " << link2.
dimension <<
" Indices are " << idx <<
" and " << _base.indices[j] <<
" from " << _base.indices);
638 for(
size_t k = passedDegree; k < passedDegreeSecond-idx.
span; ++k) {
642 for(
size_t k = passedDegreeSecond-idx.
span; k < base.
externalLinks.size(); ++k) {
647 _base.indices.erase(_base.indices.begin()+j);
648 _base.indices.erase(_base.indices.begin()+i);
650 passedDegree += idx.
span;
657 for(
size_t i = 0; i < _base.indices.size(); ) {
658 const Index& idx = _base.indices[i];
662 for(
size_t k = passedDegree; k < passedDegree+idx.
span; ++k) {
667 _base.indices.erase(_base.indices.begin()+i);
669 passedDegree += idx.
span;
678 void TensorNetwork::round_edge(
const size_t _nodeA,
const size_t _nodeB,
const size_t _maxRank,
const double _eps,
const double _softThreshold) {
681 size_t fromPos, toPos;
687 const size_t fromDegree = fromTensor.
degree();
688 const size_t toDegree = toTensor.
degree();
690 const size_t currRank = fromTensor.
dimensions[fromPos];
693 const bool transFrom = (fromPos == 0);
694 const bool reshuffleFrom = (!transFrom && fromPos != fromDegree-1);
695 std::vector<size_t> forwardShuffleFrom;
696 std::vector<size_t> backwardShuffleFrom;
698 forwardShuffleFrom.resize(fromDegree);
699 backwardShuffleFrom.resize(fromDegree);
701 for(
size_t i = 0; i < fromPos; ++i) {
702 forwardShuffleFrom[i] = i;
703 backwardShuffleFrom[i] = i;
706 for(
size_t i = fromPos; i+1 < fromDegree; ++i) {
707 forwardShuffleFrom[i+1] = i;
708 backwardShuffleFrom[i] = i+1;
711 forwardShuffleFrom[fromPos] = fromDegree-1;
712 backwardShuffleFrom[fromDegree-1] = fromPos;
714 reshuffle(fromTensor, fromTensor, forwardShuffleFrom);
718 const bool transTo = (toPos == toDegree-1);
719 const bool reshuffleTo = (!transTo && toPos != 0);
720 std::vector<size_t> forwardShuffleTo;
721 std::vector<size_t> backwardShuffleTo;
723 forwardShuffleTo.resize(toDegree);
724 backwardShuffleTo.resize(toDegree);
726 for(
size_t i = 0; i < toPos; ++i) {
727 forwardShuffleTo[i] = i+1;
728 backwardShuffleTo[i+1] = i;
731 for(
size_t i = toPos+1; i < toDegree; ++i) {
732 forwardShuffleTo[i] = i;
733 backwardShuffleTo[i] = i;
736 forwardShuffleTo[toPos] = 0;
737 backwardShuffleTo[0] = toPos;
739 reshuffle(toTensor, toTensor, forwardShuffleTo);
751 calculate_qc(fromTensor, coreA, fromTensor, fromDegree-1);
786 calculate_svd(fromTensor, S, toTensor, X, fromDegree-1, _maxRank, _eps);
797 backwardShuffleFrom.resize(fromDegree);
798 for(
size_t i = 0; i+1 < fromDegree; ++i) {
799 backwardShuffleFrom[i] = i+1;
801 backwardShuffleFrom[fromDegree-1] = 0;
802 reshuffle(fromTensor, fromTensor, backwardShuffleFrom);
808 reshuffle(fromTensor, fromTensor, backwardShuffleFrom);
812 reshuffle(toTensor, toTensor, backwardShuffleTo);
816 nodes[_nodeA].neighbors[fromPos].dimension =
nodes[_nodeA].tensorObject->dimensions[fromPos];
817 nodes[_nodeB].neighbors[toPos].dimension =
nodes[_nodeB].tensorObject->dimensions[toPos];
822 REQUIRE(_from <
nodes.size() && _to <
nodes.size(),
" Illegal node IDs " << _from <<
"/" << _to <<
" as there are only " <<
nodes.size() <<
" nodes");
834 if(_allowRankReduction) {
842 if(_allowRankReduction) {
849 std::vector<size_t> forwardShuffle(
nodes[_from].
degree());
850 std::vector<size_t> backwardShuffle(
nodes[_from].
degree());
852 for(
size_t i = 0; i < posA; ++i) {
853 forwardShuffle[i] = i;
854 backwardShuffle[i] = i;
857 for(
size_t i = posA; i+1 <
nodes[_from].degree(); ++i) {
858 forwardShuffle[i+1] = i;
859 backwardShuffle[i] = i+1;
862 forwardShuffle[posA] =
nodes[_from].degree()-1;
863 backwardShuffle[
nodes[_from].degree()-1] = posA;
865 reshuffle(fromTensor, fromTensor, forwardShuffle);
867 if(_allowRankReduction) {
873 reshuffle(fromTensor, Q, backwardShuffle);
883 std::vector<size_t> forwardShuffle(
nodes[_to].
degree());
884 std::vector<size_t> backwardShuffle(
nodes[_to].
degree());
886 for(
size_t i = 0; i < posB; ++i) {
887 forwardShuffle[i] = i+1;
888 backwardShuffle[i+1] = i;
891 for(
size_t i = posB+1; i <
nodes[_to].degree(); ++i) {
892 forwardShuffle[i] = i;
893 backwardShuffle[i] = i;
896 forwardShuffle[posB] = 0;
897 backwardShuffle[0] = posB;
899 reshuffle(toTensor, toTensor, forwardShuffle);
903 reshuffle(toTensor, toTensor, backwardShuffle);
916 REQUIRE(_slatePosition <
dimensions[_mode],
"Invalide _slatePosition to choose");
919 const size_t extNodeIndexPos =
externalLinks[_mode].indexPosition;
922 for(
size_t i = _mode+1; i <
dimensions.size(); ++i) {
931 for(
size_t i = extNodeIndexPos+1; i <
nodes[extNode].neighbors.size(); ++i) {
932 const Link& link =
nodes[extNode].neighbors[i];
945 nodes[extNode].tensorObject->fix_mode(extNodeIndexPos, _slatePosition);
946 nodes[extNode].neighbors.erase(
nodes[extNode].neighbors.begin() + extNodeIndexPos);
957 REQUIRE(_mode <
degree(),
"invalid dimension to remove a slate from");
958 REQUIRE(_slatePosition <
dimensions[_mode],
"invalide slate position to choose");
959 REQUIRE(
dimensions[_mode] > 0,
"removing the last possible slate from this index position would result a dimension of size 0");
962 const size_t extNodeIndexPos =
externalLinks[_mode].indexPosition;
966 if (
nodes[extNode].tensorObject) {
967 nodes[extNode].tensorObject->remove_slate(extNodeIndexPos, _slatePosition);
969 nodes[extNode].neighbors[extNodeIndexPos].dimension -= 1;
975 REQUIRE(_mode <
degree(),
"Invalid dimension given for resize_mode");
979 const size_t extNodeIndexPos =
externalLinks[_mode].indexPosition;
981 nodes[extNode].tensorObject->resize_mode(extNodeIndexPos, _newDim, _cutPos);
982 nodes[extNode].neighbors[extNodeIndexPos].dimension = _newDim;
994 std::vector<std::set<size_t>> contractions(strippedNet.
nodes.size());
995 for (
size_t id1=0; id1 < strippedNet.
nodes.size(); ++id1) {
1008 if (r*r >= currNode.
size() || r*r >= strippedNet.
nodes[l.
other].size()) {
1009 if (contractions[id1].empty()) {
1010 contractions[id1].insert(id1);
1012 if (contractions[l.
other].empty()) {
1013 contractions[id1].insert(l.
other);
1015 contractions[id1].insert(contractions[l.
other].begin(), contractions[l.
other].end());
1016 contractions[l.
other].clear();
1026 for (std::set<size_t> &ids : contractions) {
1027 if (ids.size() > 1) {
1041 REQUIRE(!node1.
erased,
"It appears node1 = " << _nodeId1 <<
" was already contracted?");
1042 REQUIRE(!node2.
erased,
"It appears node2 = " << _nodeId2 <<
" was already contracted?");
1045 std::vector<TensorNetwork::Link> newLinks;
1053 if (!l.
links(_nodeId1) && !l.
links(_nodeId2)) {
1054 newLinks.emplace_back(l);
1059 if (!l.
links(_nodeId2) && !l.
links(_nodeId1)) {
1060 newLinks.emplace_back(l);
1066 size_t contractedDimCount = 0;
1076 uint_fast8_t switches = 0;
1077 bool previous = node1.
neighbors[0].links(_nodeId2);
1079 if (l.
links(_nodeId2)) {
1080 contractedDimCount++;
1086 newLinks.emplace_back(l);
1093 separated1 = (switches < 2);
1096 if(node1.
neighbors[0].links(_nodeId2)) {
1097 contractedDimCount = 1;
1099 newLinks.emplace_back(node1.
neighbors[0]);
1110 if(node2.
degree() > 1 && contractedDimCount > 0) {
1111 bool previous = node2.
neighbors[0].links(_nodeId1);
1112 uint_fast8_t switches = 0;
1113 size_t lastPosOfCommon = 0;
1114 matchingOrder =
true;
1116 if (l.
links(_nodeId1)) {
1118 matchingOrder =
false;
1126 newLinks.emplace_back(l);
1133 separated2 = (switches < 2);
1135 if(contractedDimCount == 0) {
1140 matchingOrder =
true;
1145 if (!matchingOrder && separated1 && separated2) {
1155 std::vector<size_t> shuffle(node1.
degree());
1158 for (
size_t d = 0; d < node1.
degree(); ++d) {
1159 if (!node1.
neighbors[d].links(_nodeId2)) {
1165 if (l.
links(_nodeId1)) {
1173 matchingOrder =
true;
1178 std::vector<size_t> shuffle(node2.
degree());
1181 if (matchingOrder) {
1183 for (
size_t d = 0; d < node2.
degree(); ++d) {
1184 if (node2.
neighbors[d].links(_nodeId1)) {
1190 if (l.
links(_nodeId2)) {
1196 for (
size_t d = 0; d < node2.
degree(); ++d) {
1197 if (!node2.
neighbors[d].links(_nodeId1)) {
1206 const bool trans1 = separated1 && !node1.
neighbors.empty() && node1.
neighbors[0].links(_nodeId2);
1207 const bool trans2 = separated2 &&!node2.
neighbors.empty() &&!(node2.
neighbors[0].links(_nodeId1));
1213 nodes[_nodeId1].neighbors = std::move(newLinks);
1214 nodes[_nodeId2].erase();
1217 for (
size_t d = 0; d <
nodes[_nodeId1].neighbors.size(); ++d) {
1218 const Link& l =
nodes[_nodeId1].neighbors[d];
1233 REQUIRE(!
nodes[_nodeId1].erased,
"It appears node1 = " << _nodeId1 <<
" was already contracted?");
1234 REQUIRE(!
nodes[_nodeId2].erased,
"It appears node2 = " << _nodeId2 <<
" was already contracted?");
1236 if (_nodeId1 == _nodeId2) {
1237 return static_cast<double>(
nodes[_nodeId1].size());
1243 size_t cost =
nodes[_nodeId1].size();
1244 for(
const Link& neighbor :
nodes[_nodeId2].neighbors) {
1245 if(!neighbor.links(_nodeId1)) {
1246 cost *= neighbor.dimension;
1249 return static_cast<double>(cost);
1255 for (
const size_t id : _ids ) {
1259 if (_ids.empty()) {
return ~0ul; }
1261 if (_ids.size() == 1) {
return *_ids.begin(); }
1263 if (_ids.size() == 2) {
1264 auto secItr = _ids.begin(); ++secItr;
1266 return *_ids.begin();
1269 if (_ids.size() == 3) {
1270 auto idItr = _ids.begin();
1274 double sa = 1, sb = 1, sc = 1;
1275 double sab = 1, sbc = 1, sac = 1;
1276 for (
size_t d = 0; d < na.
degree(); ++d) {
1278 sab *=
static_cast<double>(na.
neighbors[d].dimension);
1280 sac *=
static_cast<double>(na.
neighbors[d].dimension);
1282 sa *=
static_cast<double>(na.
neighbors[d].dimension);
1285 for (
size_t d = 0; d < nb.
degree(); ++d) {
1287 sbc *=
static_cast<double>(nb.
neighbors[d].dimension);
1289 sb *=
static_cast<double>(nb.
neighbors[d].dimension);
1292 for (
size_t d = 0; d < nc.
degree(); ++d) {
1294 sc *=
static_cast<double>(nc.
neighbors[d].dimension);
1298 double costAB = sa*sb*sac*sbc*(sab+sc);
1299 double costAC = sa*sc*sab*sbc*(sac+sb);
1300 double costBC = sb*sc*sab*sac*(sbc+sa);
1302 if (costAB < costAC && costAB < costBC) {
1303 LOG(TNContract,
"contraction of ab first " << sa <<
" " << sb <<
" " << sc <<
" " << sab <<
" " << sbc <<
" " << sac);
1305 }
else if (costAC < costBC) {
1306 LOG(TNContract,
"contraction of ac first " << sa <<
" " << sb <<
" " << sc <<
" " << sab <<
" " << sbc <<
" " << sac);
1309 LOG(TNContract,
"contraction of bc first " << sa <<
" " << sb <<
" " << sc <<
" " << sab <<
" " << sbc <<
" " << sac);
1317 double bestCost = std::numeric_limits<double>::max();
1318 std::vector<std::pair<size_t, size_t>> bestOrder;
1322 c(bestCost, bestOrder, strippedNetwork);
1325 INTERNAL_CHECK(bestCost < std::numeric_limits<double>::max() && !bestOrder.empty(),
"Internal Error.");
1327 for (
const std::pair<size_t,size_t> &c : bestOrder) {
1332 return bestOrder.back().first;
1339 res() = (*this)(i&0) * (*
this)(i&0);
1340 return std::sqrt(res[0]);
1345 std::stringstream graphLayout;
1347 graphLayout <<
"graph G {" << std::endl;
1348 graphLayout <<
"graph [mclimit=1000, maxiter=1000, overlap = false, splines = true]" << std::endl;
1350 for(
size_t i = 0; i <
nodes.size(); ++i) {
1352 if(
nodes[i].erased) {
1353 graphLayout <<
"\tN"<<i<<
" [label=\"N"<<i<<
"\", shape=circle, fixedsize=shape, height=0.45];" << std::endl;
1355 graphLayout <<
"\tN"<<i<<
" [label=\"";
1356 for(
size_t k=0; k+1 <
nodes[i].degree(); ++k) {
1359 graphLayout <<
"<i"<<k<<
"> "<<i<<
"| ";
1361 graphLayout <<
"<i"<<k<<
"> N"<<i<<
"| ";
1364 graphLayout <<
"<i"<<k<<
"> N| ";
1366 graphLayout <<
"<i"<<k<<
"> | ";
1370 graphLayout <<
"<i"<<
nodes[i].degree()-1<<
"> N"<<i<<
"\", shape=record, fixedsize=shape, height=0.45, style=\"rounded,filled\"];" << std::endl;
1372 graphLayout <<
"<i"<<
nodes[i].degree()-1<<
">\", shape=record, fixedsize=shape, height=0.45, style=\"rounded,filled\"];" << std::endl;
1376 for(
size_t j = 0; j <
nodes[i].neighbors.size(); ++j) {
1377 if(
nodes[i].neighbors[j].external) {
1378 graphLayout <<
"\t"<<
nodes[i].neighbors[j].indexPosition<<
" [shape=diamond, fixedsize=shape, height=0.38, width=0.38, style=filled];" << std::endl;
1379 graphLayout <<
"\tN"<<i<<
":i"<<j<<
" -- " <<
nodes[i].neighbors[j].indexPosition <<
" [len=1, label=\""<<
nodes[i].neighbors[j].dimension<<
"\"];" << std::endl;
1380 }
else if(
nodes[i].neighbors[j].other < i) {
1381 graphLayout <<
"\tN"<<i<<
":i"<<j<<
" -- " <<
"N"<<
nodes[i].neighbors[j].other <<
":i"<<
nodes[i].neighbors[j].indexPosition<<
" [label=\""<<
nodes[i].neighbors[j].dimension<<
"\"];" << std::endl;
1386 graphLayout <<
"}" << std::endl;
1387 misc::exec(std::string(
"dot -Tsvg > ") + _filename+
".svg", graphLayout.str());
1430 if(_format == FileFormat::TSV) {
1431 _stream << std::setprecision(std::numeric_limits<value_t>::digits10 + 1);
1434 write_to_stream<size_t>(_stream, 1, _format);
1438 if(_format == FileFormat::TSV) { _stream <<
'\n'; }
1442 write_to_stream<size_t>(_stream, el.
other, _format);
1443 write_to_stream<size_t>(_stream, el.
indexPosition, _format);
1444 write_to_stream<size_t>(_stream, el.
dimension, _format);
1446 if(_format == FileFormat::TSV) { _stream <<
"\n\n"; }
1449 write_to_stream<size_t>(_stream, _obj.
nodes.size(), _format);
1450 if(_format == FileFormat::TSV) { _stream <<
'\n'; }
1452 write_to_stream<size_t>(_stream, node.
neighbors.size(), _format);
1454 write_to_stream<bool>(_stream, link.
external, _format);
1455 write_to_stream<size_t>(_stream, link.
other, _format);
1456 write_to_stream<size_t>(_stream, link.
indexPosition, _format);
1457 write_to_stream<size_t>(_stream, link.
dimension, _format);
1460 if(_format == FileFormat::TSV) { _stream <<
'\n'; }
1465 if(_format == FileFormat::TSV) { _stream <<
'\n'; }
1470 IF_CHECK(
size_t ver = ) read_from_stream<size_t>(_stream, _format);
1471 REQUIRE(ver == 1,
"Unknown stream version to open (" << ver <<
")");
1480 el.
other = read_from_stream<size_t>(_stream, _format);
1481 el.
indexPosition = read_from_stream<size_t>(_stream, _format);
1482 el.
dimension = read_from_stream<size_t>(_stream, _format);
1486 _obj.
nodes.resize(read_from_stream<size_t>(_stream, _format));
1488 node.
neighbors.resize(read_from_stream<size_t>(_stream, _format));
1491 link.
external = read_from_stream<bool>(_stream, _format);
1492 link.
other = read_from_stream<size_t>(_stream, _format);
1493 link.
indexPosition = read_from_stream<size_t>(_stream, _format);
1494 link.
dimension = read_from_stream<size_t>(_stream, _format);
1501 read_from_stream<Tensor>(_stream, *node.
tensorObject, _format);
Header file for some additional math functions.
static void add_network_to_network(internal::IndexedTensorWritable< TensorNetwork > &&_base, internal::IndexedTensorReadOnly< TensorNetwork > &&_toInsert)
Inserts all nodes from _toInsert into _base, creating links where demanded by the indices...
void contract(const size_t _nodeId1, const size_t _nodeId2)
Contracts the nodes with indices _nodeId1 and _nodeId2.
TensorNetwork stripped_subnet(const std::function< bool(size_t)> &_idF=[](size_t){ return true;}) const
Creates a dataless copy of a subnet.
void draw(const std::string &_filename) const
Draws a graph representation of the TensorNetwork.
static void link_traces_and_fix(internal::IndexedTensorWritable< TensorNetwork > &&_base)
Finds traces defined by the indices and internally links the corresponding indices. Also applys all fixed indices.
Internal representation of an read and write and moveable indexed Tensor or TensorNetwork.
size_t degree() const
Returns the degree of the tensor.
void reshuffle(Tensor &_out, const Tensor &_base, const std::vector< size_t > &_shuffle)
: Performs a simple reshuffle. Much less powerfull then a full evaluate, but more efficient...
void sanitize()
Removes all erased nodes from the TensorNetwork.
virtual void remove_slate(const size_t _mode, const size_t _slatePosition)
removes the given _slatePosition from the _mode. this reduces the given dimension by one ...
const std::vector< ContractionHeuristic > contractionHeuristics
ZeroNode
Internal indicator to prevent the creation of an degree zero node in TensorNetwork constructor...
virtual TensorNetwork * get_copy() const
Returns a new copy of the network.
Header file for the Index class.
size_t dimension
Dimension of the link, always equals to other->tensorObject->dimensions[indexPosition].
std::pair< size_t, size_t > find_common_edge(const size_t _nodeA, const size_t _nodeB) const
Finds the position of a single common edge between two nodes.
Header file for the IndexedTensorMoveable class.
bool erased
Internal Flag.
virtual void contract_unconnected_subnetworks()
Contracts all nodes that are not connected to any external links.
Header file defining lists of indexed tensors.
Header file for the standard contaienr support functions.
Very general class used to represent arbitary tensor networks.
size_t size
Size of the Tensor – always equal to the product of the dimensions.
Internal representation of an readable indexed Tensor or TensorNetwork.
void stream_reader(std::istream &_stream, Tensor &_obj, const FileFormat _format)
tries to restore the tensor from a stream of data.
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
Header file for the classes defining factorisations of Tensors.
void calculate_rq(Tensor &_R, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level RQ calculation of a given Tensor _input = _R _Q.
size_t datasize() const
Calculates the storage requirement of the current representation.
The TensorNode class is used by the class TensorNetwork to store the componentent tensors defining th...
void calculate_qr(Tensor &_Q, Tensor &_R, Tensor _input, const size_t _splitPos)
Low-Level QR calculation of a given Tensor _input = _Q _R.
The main namespace of xerus.
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
bool links(const size_t _other) const noexcept
Checks whether this link links to a particular node.
Header file for templates to store and restore objects from / to files / streams. ...
std::string exec(const std::string &_cmd)
Execute a given command.
Tensor operator*(const value_t _factor, Tensor _tensor)
Calculates the entrywise multiplication of the Tensor _tensor with the constant _factor.
void perform_traces(const size_t _nodeId)
Performs all traces in the given node.
void modify_diagonal_entries(const std::function< void(value_t &)> &_f)
Modifies the diagonal entries according to the given function.
XERUS_force_inline void read_from_stream(std::istream &_stream, T &_obj, const FileFormat _format)
size_t size() const noexcept
size_t fixed_position() const
: Returns the fixed position of a fixed index.
Class representing a link from a TensorNode to another node or an external index. ...
void reduce_representation()
Contracts all nodes that are joined by a full-rank edge.
size_t degree() const
Gets the degree of the TensorNetwork.
value_t operator[](const size_t _position) const
Read the value at a specific position.
Header file for the Tensor class.
Header file for some helper functions.
Internal representation of an readable and writeable indexed Tensor or TensorNetwork.
void calculate_cq(Tensor &_C, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level CQ calculation of a given Tensor _input = _C _Q.
double contraction_cost(const size_t _nodeId1, const size_t _nodeId2) const
Approximates the cost of contraction two given nodes.
void contract(Tensor &_result, const Tensor &_lhs, const bool _lhsTrans, const Tensor &_rhs, const bool _rhsTrans, const size_t _numModes)
Low-level contraction between Tensors.
virtual bool specialized_contraction(std::unique_ptr< internal::IndexedTensorMoveable< TensorNetwork >> &_out, internal::IndexedTensorReadOnly< TensorNetwork > &&_me, internal::IndexedTensorReadOnly< TensorNetwork > &&_other) const
(Internal) Calculates the contraction between _me and _other and stores the result in _out...
FileFormat
possible file formats for tensor storage
virtual void require_correct_format() const
Sanity check for the TensorNetwork and if applicable for the specific format.
void reshuffle_nodes(const std::function< size_t(size_t)> &_f)
reshuffled the nodes according to the given function
virtual void resize_mode(const size_t _mode, const size_t _newDim, const size_t _cutPos=~0ul)
Resizes a specific mode of the TensorNetwork.
virtual bool specialized_sum(std::unique_ptr< internal::IndexedTensorMoveable< TensorNetwork >> &_out, internal::IndexedTensorReadOnly< TensorNetwork > &&_me, internal::IndexedTensorReadOnly< TensorNetwork > &&_other) const
(Internal) Calculates the sum between _me and _other and stores the result in _out. Requires that *this is the tensorObjectReadOnly of _me.
bool external
Flag indicating whether this link correspond to an external index.
size_t other
The index of the otherNode this Link links to.
TensorNetwork()
Constructs an order zero TensorNetwork.
Header file for comfort functions and macros that should not be exported in the library.
size_t degree() const noexcept
Header file for some elementary string manipulation routines.
void(* ContractionHeuristic)(double &, std::vector< std::pair< size_t, size_t >> &, TensorNetwork)
std::vector< Link > externalLinks
The open links of the network in order.
size_t indexPosition
IndexPosition on the other node or index of external index.
Header file for the class managing the contraction heuristics.
void require_valid_network(const bool _check_erased=true) const
Sanity checks the network.
Abstract internal representation of an read and writeable indexed Tensor or TensorNetwork.
Header file for shorthand notations that are xerus specific but used throughout the library...
double value_t
The type of values to be used by xerus.
Class used to represent indices that can be used to write tensor calculations in index notation...
XERUS_force_inline void write_to_stream(std::ostream &_stream, const T &_value, FileFormat _format)
void calculate_svd(Tensor &_U, Tensor &_S, Tensor &_Vt, Tensor _input, const size_t _splitPos, const size_t _maxRank, const value_t _eps)
Low-Level SVD calculation of a given Tensor _input = _U _S _Vt.
void stream_writer(std::ostream &_stream, const Tensor &_obj, const FileFormat _format)
pipes all information necessary to restore the current tensor into _stream.
bool approx_equal(const xerus::Tensor &_a, const xerus::Tensor &_b, const xerus::value_t _eps=EPSILON)
Checks whether two tensors are approximately equal.
std::unique_ptr< Tensor > tensorObject
Save slot for the tensorObject associated with this node.
virtual void operator/=(const value_t _divisor)
Performs the entrywise divison by a constant _divisor.
virtual value_t frob_norm() const
Calculates the frobenious norm of the TensorNetwork.
std::vector< Link > init_from_dimension_array()
: Sets the externalLinks and returns an Link vector for a node, assuming that this node is the only n...
virtual void transfer_core(const size_t _from, const size_t _to, const bool _allowRankReduction=true)
Transfers the core from one given node to another.
size_t span
The span states how many dimensions are covered by the index.
virtual void fix_mode(const size_t _mode, const size_t _slatePosition)
Fixes a specific mode to a specific value, effectively reducing the order by one. ...
std::vector< Link > neighbors
Vector of links defining the connection of this node to the network.
virtual void round_edge(const size_t _nodeA, const size_t _nodeB, const size_t _maxRank, const double _eps, const double _softThreshold)
Thresholds the rank between two given nodes.
bool fixed() const
Checks whether the Index represents a fixed number.
Header file for the TensorNetwork class.
void swap_external_links(const size_t _i, const size_t _j)
Swaps the external indices _i and _j, effectively changing those indices for the represented Tensor (...
std::vector< size_t > MultiIndex
: Represention of a MultiIndex, i.e. the tuple of positions for each dimension determining a single p...
internal::IndexedTensor< TensorNetwork > operator()(args... _args)
Indexes the TensorNetwork for read/write use.
virtual void operator*=(const value_t _factor)
Performs the entrywise multiplication with a constant _factor.
constexpr T pow(const T &_base, const uint32 _exp) noexcept
: Calculates _base^_exp by binary exponentiation
std::vector< TensorNode > nodes
The nodes constituting the network. The order determines the ids of the nodes.
internal::IndexedTensorMoveable< Tensor > operator/(internal::IndexedTensorReadOnly< Tensor > &&_b, internal::IndexedTensorReadOnly< Tensor > &&_A)
virtual void specialized_evaluation(internal::IndexedTensorWritable< TensorNetwork > &&_me, internal::IndexedTensorReadOnly< TensorNetwork > &&_other)
(Internal) Evaluates _other into _me. Requires that *this is the tensorObjectReadOnly of _me...
void calculate_qc(Tensor &_Q, Tensor &_C, Tensor _input, const size_t _splitPos)
Low-Level QC calculation of a given Tensor _input = _Q _C.
std::vector< size_t > dimensions
Dimensions of the external indices, i.e. the dimensions of the tensor represented by the network...