55 REQUIRE(
size != 0,
"May not create tensors with an dimension == 0.");
63 sparseData.reset(
new std::map<size_t, value_t>());
70 REQUIRE(
size != 0,
"May not create tensors with an dimension == 0.");
75 value_t*
const realData = denseData.get();
76 for (
size_t i=0; i <
size; ++i) {
83 value_t*
const realData = denseData.get();
84 for (
size_t i=0; i <
size; ++i) {
91 value_t*
const realData = denseData.get();
95 realData[idx] = _f(multIdx);
98 size_t changingIndex =
degree()-1;
99 multIdx[changingIndex]++;
100 while(multIdx[changingIndex] ==
dimensions[changingIndex]) {
101 multIdx[changingIndex] = 0;
104 if(changingIndex >=
degree()) {
return; }
105 multIdx[changingIndex]++;
112 REQUIRE(_N <=
size,
"Cannot create more non zero entries that the dimension of the Tensor.");
113 for (
size_t i = 0; i < _N; ++i) {
114 std::pair<size_t, value_t> entry = _f(i,
size);
115 REQUIRE(entry.first <
size,
"Postion is out of bounds " << entry.first);
116 REQUIRE(!misc::contains(*sparseData, entry.first),
"Allready contained " << entry.first);
117 sparseData->insert(std::move(entry));
125 for(
size_t i = 0; i < ret.
size; ++i) {
132 REQUIRE(_dimensions.size()%2 == 0,
"Identity tensor must have even degree, here: " << _dimensions.size());
133 const size_t d = _dimensions.size();
141 bool notMultiLevelBreak =
true;
142 while(notMultiLevelBreak) {
145 position[0]++; position[d/2]++;
148 position[node] = position[d/2+node] = 0;
150 if(node == d/2) {notMultiLevelBreak =
false;
break;}
151 position[node]++; position[d/2+node]++;
165 for(
size_t i = 0; i < misc::min(ret.
dimensions); ++i) {
175 ret[_position] = 1.0;
182 ret[_position] = 1.0;
213 #pragma GCC diagnostic push 214 #pragma GCC diagnostic ignored "-Wfloat-equal" 216 #pragma GCC diagnostic pop 234 return sparseData->size();
243 for(
size_t i = 0; i <
size; ++i) {
244 if(std::abs(denseData.get()[i]) > _eps ) { count++; }
249 for(
const auto& entry : *sparseData) {
250 if(std::abs(entry.second) > _eps) { count++; }
258 for(
size_t i = 0; i <
size; ++i) {
259 if(!std::isfinite(denseData.get()[i])) {
return false; }
262 for(
const auto& entry : *sparseData) {
263 if(!std::isfinite(entry.second)) {
return false; }
280 for(
const auto& entry : *sparseData) {
281 norm += std::abs(entry.second);
283 return std::abs(
factor)*norm;
291 for(
const auto& entry : *sparseData) {
294 return std::abs(
factor)*sqrt(norm);
300 plus_minus_equal<1>(*
this, _other);
325 REQUIRE(_position <
size,
"Position " << _position <<
" does not exist in Tensor of dimensions " <<
dimensions);
330 return denseData.get()[_position];
332 value_t& result = (*sparseData)[_position];
335 return denseData.get()[_position];
342 REQUIRE(_position <
size,
"Position " << _position <<
" does not exist in Tensor of dimensions " <<
dimensions);
345 return factor*denseData.get()[_position];
347 const std::map<size_t, value_t>::const_iterator entry = sparseData->find(_position);
348 if(entry == sparseData->end()) {
351 return factor*entry->second;
366 REQUIRE(_position <
size,
"Position " << _position <<
" does not exist in Tensor of dimensions " <<
dimensions);
368 REQUIRE((
is_dense() && denseData.unique()) || (
is_sparse() && sparseData.unique()) ,
"Data must be unique to call at().");
371 return denseData.get()[_position];
373 value_t& result = (*sparseData)[_position];
376 return denseData.get()[_position];
385 REQUIRE(_position <
size,
"Position " << _position <<
" does not exist in Tensor of dimensions " <<
dimensions);
389 return denseData.get()[_position];
391 const std::map<size_t, value_t>::const_iterator entry = sparseData->find(_position);
392 if(entry == sparseData->end()) {
395 return entry->second;
404 return denseData.get();
409 REQUIRE(
is_dense(),
"Unsanitized dense data requested, but representation is not dense!");
410 return denseData.get();
415 REQUIRE(
is_dense(),
"Unsanitized dense data requested, but representation is not dense!");
416 return denseData.get();
422 if(!denseData.unique()) {
427 return denseData.get();
432 REQUIRE(
is_dense(),
"Internal dense data requested, but representation is not dense!");
438 CHECK(
is_sparse(), warning,
"Request for sparse data although the Tensor is not sparse.");
441 return *sparseData.get();
446 REQUIRE(
is_sparse(),
"Unsanitized sparse data requested, but representation is not sparse!");
447 return *sparseData.get();
452 REQUIRE(
is_sparse(),
"Unsanitized sparse data requested, but representation is not sparse!");
453 return *sparseData.get();
459 if(sparseData.unique()) {
464 sparseData.reset(
new std::map<size_t, value_t>());
468 return *sparseData.get();
473 REQUIRE(
is_sparse(),
"Internal sparse data requested, but representation is not sparse!");
501 const size_t oldDataSize =
size;
508 if(oldDataSize !=
size || !denseData.unique()) {
522 if(sparseData.unique()) {
525 sparseData.reset(
new std::map<size_t, value_t>());
529 sparseData.reset(
new std::map<size_t, value_t>());
537 const size_t oldDataSize =
size;
543 if(oldDataSize !=
size || !denseData.unique()) {
551 if(sparseData.unique()) {
554 sparseData.reset(
new std::map<size_t, value_t>());
564 if(
size != 1 || !denseData.unique()) {
567 *denseData.get() = 0.0;
569 if(sparseData.unique()) {
572 sparseData.reset(
new std::map<size_t, value_t>());
589 denseData = _newData;
603 denseData.reset(_newData.release());
616 std::shared_ptr<std::map<size_t, value_t>> newD(
new std::map<size_t, value_t>(std::move(_newData)));
617 sparseData = std::move(newD);
621 REQUIRE(misc::product(_newDimensions) ==
size,
"New dimensions must not change the size of the tensor in reinterpretation: " << misc::product(_newDimensions) <<
" != " <<
size);
627 REQUIRE(_mode <
degree(),
"Can't resize mode " << _mode <<
" as the tensor is only order " <<
degree());
632 _cutPos = std::min(_cutPos, oldDim);
634 REQUIRE(_newDim > 0,
"Dimension must be larger than 0! Is " << _newDim);
635 REQUIRE(_newDim > oldDim || _cutPos >= oldDim -_newDim,
"Cannot remove " << oldDim -_newDim <<
" slates starting (exclusivly) at position " << _cutPos);
638 const size_t oldStepSize = oldDim*dimStepSize;
639 const size_t newStepSize = _newDim*dimStepSize;
640 const size_t blockCount =
size / oldStepSize;
641 const size_t newsize = blockCount*newStepSize;
644 std::unique_ptr<value_t[]> tmpData(
new value_t[newsize]);
646 if (_newDim > oldDim) {
647 const size_t insertBlockSize = (_newDim-oldDim)*dimStepSize;
649 for (
size_t i = 0; i < blockCount; ++i ) {
650 value_t*
const currData = tmpData.get()+i*newStepSize;
652 misc::copy(currData+insertBlockSize, denseData.get()+i*oldStepSize, oldStepSize);
654 }
else if(_cutPos == oldDim) {
655 for (
size_t i = 0; i < blockCount; ++i ) {
656 value_t*
const currData = tmpData.get()+i*newStepSize;
657 misc::copy(currData, denseData.get()+i*oldStepSize, oldStepSize);
661 const size_t preBlockSize = _cutPos*dimStepSize;
662 const size_t postBlockSize = (oldDim-_cutPos)*dimStepSize;
663 for (
size_t i = 0; i < blockCount; ++i) {
664 value_t*
const currData = tmpData.get()+i*newStepSize;
665 misc::copy(currData, denseData.get()+i*oldStepSize, preBlockSize);
667 misc::copy(currData+preBlockSize+insertBlockSize, denseData.get()+i*oldStepSize+preBlockSize, postBlockSize);
671 if (_cutPos < oldDim) {
672 const size_t removedSlates = (oldDim-_newDim);
673 const size_t removedBlockSize = removedSlates*dimStepSize;
674 const size_t preBlockSize = (_cutPos-removedSlates)*dimStepSize;
675 const size_t postBlockSize = (oldDim-_cutPos)*dimStepSize;
677 INTERNAL_CHECK(removedBlockSize+preBlockSize+postBlockSize == oldStepSize && preBlockSize+postBlockSize == newStepSize,
"IE");
679 for (
size_t i = 0; i < blockCount; ++i) {
680 value_t*
const currData = tmpData.get()+i*newStepSize;
681 misc::copy(currData, denseData.get()+i*oldStepSize, preBlockSize);
682 misc::copy(currData+preBlockSize, denseData.get()+i*oldStepSize+preBlockSize+removedBlockSize, postBlockSize);
685 for (
size_t i = 0; i < blockCount; ++i) {
686 misc::copy(tmpData.get()+i*newStepSize, denseData.get()+i*oldStepSize, newStepSize);
693 std::unique_ptr<std::map<size_t, value_t>> tmpData(
new std::map<size_t, value_t>());
695 if (_newDim > oldDim) {
696 const size_t slatesAdded = _newDim-oldDim;
697 for(
const auto& entry : *sparseData.get()) {
699 const size_t k = entry.first%dimStepSize;
700 const size_t j = (entry.first%oldStepSize)/dimStepSize;
701 const size_t i = entry.first/oldStepSize;
704 tmpData->emplace(i*newStepSize+j*dimStepSize+k, entry.second);
706 tmpData->emplace(i*newStepSize+(j+slatesAdded)*dimStepSize+k, entry.second);
710 const size_t slatesRemoved = oldDim-_newDim;
711 for(
const auto& entry : *sparseData.get()) {
713 const size_t k = entry.first%dimStepSize;
714 const size_t j = (entry.first%oldStepSize)/dimStepSize;
715 const size_t i = entry.first/oldStepSize;
717 if(j < _cutPos-slatesRemoved) {
718 tmpData->emplace(i*newStepSize+j*dimStepSize+k, entry.second);
719 }
else if(j >= _cutPos) {
720 tmpData->emplace(i*newStepSize+(j-slatesRemoved)*dimStepSize+k, entry.second);
724 sparseData.reset(tmpData.release());
733 REQUIRE(_slatePosition <
dimensions[_mode],
"The given slatePosition must be smaller than the corresponding dimension. Here " << _slatePosition <<
" >= " <<
dimensions[_mode] <<
", dim = " <<
dimensions <<
"=" <<
size <<
" mode " << _mode);
735 const size_t stepCount = misc::product(
dimensions, 0, _mode);
737 const size_t stepSize =
dimensions[_mode]*blockSize;
738 const size_t slateOffset = _slatePosition*blockSize;
741 std::unique_ptr<value_t[]> tmpData(
new value_t[stepCount*blockSize]);
744 for(
size_t i = 0; i < stepCount; ++i) {
745 misc::copy(tmpData.get()+i*blockSize, denseData.get()+i*stepSize+slateOffset, blockSize);
750 std::unique_ptr<std::map<size_t, value_t>> tmpData(
new std::map<size_t, value_t>());
752 for(
const auto& entry : *sparseData.get()) {
754 const size_t k = entry.first%blockSize;
755 const size_t j = (entry.first%stepSize)/blockSize;
756 const size_t i = entry.first/stepSize;
758 if(j == _slatePosition) {
759 tmpData->emplace(i*blockSize+k, entry.second);
763 sparseData.reset(tmpData.release());
768 size = stepCount*blockSize;
782 REQUIRE(_firstMode != _secondMode,
"Given indices must not coincide");
785 if(_firstMode > _secondMode) { std::swap(_firstMode, _secondMode); }
789 const size_t front = misc::product(
dimensions, 0, _firstMode);
790 const size_t mid = misc::product(
dimensions, _firstMode+1, _secondMode);
792 const size_t traceDim =
dimensions[_firstMode];
793 const size_t frontStepSize = traceDim*mid*traceDim*back;
794 const size_t traceStepSize = mid*traceDim*back+back;
795 const size_t midStepSize = traceDim*back;
797 size = front*mid*back;
800 std::unique_ptr<value_t[]> newData(
new value_t[
size]);
803 for(
size_t f = 0; f < front; ++f) {
804 for(
size_t t = 0; t < traceDim; ++t) {
805 for(
size_t m = 0; m < mid; ++m) {
806 misc::add_scaled(newData.get()+(f*mid+m)*back,
factor, denseData.get()+f*frontStepSize+t*traceStepSize+m*midStepSize, back);
813 std::unique_ptr<std::map<size_t, value_t>> newData(
new std::map<size_t, value_t>());
815 for(
const auto& entry : *sparseData) {
816 size_t pos = entry.first;
817 const size_t backIdx = pos%back;
819 const size_t traceBackIdx = pos%traceDim;
821 const size_t midIdx = pos%mid;
823 const size_t traceFrontIdx = pos%traceDim;
825 const size_t frontIdx = pos;
827 if(traceFrontIdx == traceBackIdx) {
828 (*newData)[(frontIdx*mid + midIdx)*back + backIdx] +=
factor*entry.second;
832 sparseData.reset(newData.release());
848 for(
size_t i = 1; i <
degree(); ++i) {
853 const size_t numDiags = misc::min(
dimensions);
855 for(
size_t i = 0; i < numDiags; ++i){
869 for(
size_t i = 1; i <
degree(); ++i) {
874 const size_t numDiags = misc::min(
dimensions);
876 for(
size_t i = 0; i < numDiags; ++i){
877 _f(
at(i*stepSize), i);
886 for(
size_t i = 0; i <
size; ++i) { _f(
at(i)); }
888 for(
size_t i = 0; i <
size; ++i) {
895 IF_CHECK(
size_t succ =) sparseData->erase(i);
906 for(
size_t i = 0; i <
size; ++i) { _f(
at(i), i); }
908 for(
size_t i = 0; i <
size; ++i) {
915 IF_CHECK(
size_t succ =) sparseData->erase(i);
930 _f(
at(idx), multIdx);
938 IF_CHECK(
size_t succ =) sparseData->erase(idx);
945 size_t changingIndex =
degree()-1;
946 multIdx[changingIndex]++;
947 while(multIdx[changingIndex] ==
dimensions[changingIndex]) {
948 multIdx[changingIndex] = 0;
951 if(changingIndex >=
degree()) {
return; }
952 multIdx[changingIndex]++;
959 std::vector<size_t> stepSizes(_dimensions.size());
960 if(!_dimensions.empty()) {
961 stepSizes.back() = 1;
962 for(
size_t i = stepSizes.size(); i > 1; --i) {
963 stepSizes[i-2] = stepSizes[i-1]*_dimensions[i-1];
972 REQUIRE(
degree() == _offsets.size(),
"Degrees of the this tensor and number of given offsets must coincide. Here " <<
degree() <<
" vs " << _offsets.size());
973 for(
size_t d = 0; d <
degree(); ++d) {
974 REQUIRE(
dimensions[d] >= _offsets[d]+_other.
dimensions[d],
"Invalid offset/tensor dimension for dimension " << d <<
" because this dimension is " <<
dimensions[d] <<
" but other tensor dimension + offset is " << _offsets[d]+_other.
dimensions[d]);
985 for(
size_t d = 0; d <
degree(); ++d) {
986 offset += _offsets[d]*stepSizes[d];
989 const size_t blockSize = _other.
dimensions.back();
995 for(
size_t i = 1; i < _other.
size/blockSize; ++i) {
996 size_t index =
degree()-2;
998 inPosition += blockSize;
999 outPosition += stepSizes[index];
1000 while(i%multStep == 0) {
1001 outPosition -=
dimensions[index]*stepSizes[index];
1003 outPosition += stepSizes[index];
1015 dataPtr[newPos] += _other.
factor*entry.second;
1021 data[newPos] += _other.
factor*entry.second;
1032 for(
const auto& entry : *sparseData) {
1033 denseData.get()[entry.first] =
factor*entry.second;
1052 sparseData.reset(
new std::map<size_t, value_t>());
1053 for(
size_t i = 0; i <
size; ++i) {
1054 if(std::abs(
factor*denseData.get()[i]) >= _eps) {
1055 sparseData->emplace_hint(sparseData->end(), i,
factor*denseData.get()[i]);
1071 for (
size_t i = 0; i <
size; ++i) {
1104 _me.sparseData.reset();
1115 for(
const auto& entry : *_sparseData) {
1116 _denseData.get()[entry.first] += _factor*entry.second;
1122 for(
const auto& entry : *_summand) {
1123 std::pair<std::map<size_t, value_t>::iterator,
bool> result = _sum->emplace(entry.first, _factor*entry.second);
1124 if(!result.second) {
1125 result.first->second += _factor*entry.second;
1132 REQUIRE(_multiIndex.size() == _dimensions.size(),
"MultiIndex has wrong degree. Given " << _multiIndex.size() <<
", expected " << _dimensions.size());
1134 size_t finalIndex = 0;
1135 for(
size_t i = 0; i < _multiIndex.size(); ++i) {
1136 REQUIRE(_multiIndex[i] < _dimensions[i],
"Index "<< i <<
" out of bounds " << _multiIndex[i] <<
" >=! " << _dimensions[i]);
1137 finalIndex *= _dimensions[i];
1138 finalIndex += _multiIndex[i];
1145 REQUIRE(_position < misc::product(_dimensions),
"Invalid position " << _position <<
" given. Max size is : " << misc::product(_dimensions));
1148 for(
size_t i = 0; i < _dimensions.size(); ++i) {
1149 const size_t k = _dimensions.size() - 1 - i;
1150 index[k] = _position%_dimensions[k];
1151 _position /= _dimensions[k];
1160 if(!denseData.unique()) {
1161 value_t*
const oldDataPtr = denseData.get();
1166 if(!sparseData.unique()) {
1167 sparseData.reset(
new std::map<size_t, value_t>(*sparseData));
1175 if(!denseData.unique()) {
1179 if(!sparseData.unique()) {
1180 sparseData.reset(
new std::map<size_t, value_t>());
1189 if(denseData.unique()) {
1192 value_t*
const oldDataPtr = denseData.get();
1197 if(!sparseData.unique()) {
1198 sparseData.reset(
new std::map<size_t, value_t>(*sparseData));
1201 for(std::pair<const size_t, value_t>& entry : *sparseData) {
1246 _tensor /= _divisor;
1253 REQUIRE(_numModes <= _lhs.
degree() && _numModes <= _rhs.
degree(),
"Cannot contract more indices than both tensors have. we have: " 1254 << _lhs.
degree() <<
" and " << _rhs.
degree() <<
" but want to contract: " << _numModes);
1256 const size_t lhsRemainOrder = _lhs.
degree() - _numModes;
1257 const size_t lhsRemainStart = _lhsTrans ? _numModes : 0;
1258 const size_t lhsContractStart = _lhsTrans ? 0 : lhsRemainOrder;
1259 const size_t lhsRemainEnd = lhsRemainStart + lhsRemainOrder;
1261 const size_t rhsRemainOrder = _rhs.
degree() - _numModes;
1262 const size_t rhsRemainStart = _rhsTrans ? 0 : _numModes;
1263 IF_CHECK(
const size_t rhsContractStart = _rhsTrans ? rhsRemainOrder : 0;)
1264 const size_t rhsRemainEnd = rhsRemainStart + rhsRemainOrder;
1267 "Dimensions of the be contracted indices do not coincide. " <<_lhs.
dimensions <<
" ("<<_lhsTrans<<
") and " << _rhs.
dimensions <<
" ("<<_rhsTrans<<
") with " << _numModes);
1269 const size_t leftDim = misc::product(_lhs.
dimensions, lhsRemainStart, lhsRemainEnd);
1270 const size_t midDim = misc::product(_lhs.
dimensions, lhsContractStart, lhsContractStart+_numModes);
1271 const size_t rightDim = misc::product(_rhs.
dimensions, rhsRemainStart, rhsRemainEnd);
1274 const size_t finalSize = leftDim*rightDim;
1275 const size_t sparsityExpectation = size_t(
double(finalSize)*(1.0 -
misc::pow(1.0 -
double(_lhs.
sparsity()*_rhs.
sparsity())/(
double(_lhs.
size)*double(_rhs.
size)), midDim)));
1278 const bool sparseResult = (_lhs.
is_sparse() && _rhs.
is_sparse()) || (finalSize > 64 && Tensor::sparsityFactor*sparsityExpectation < finalSize*2) ;
1283 std::unique_ptr<Tensor> tmpResult;
1285 if( &_result == &_lhs || &_result == &_rhs
1287 || _result.
degree() != lhsRemainOrder + rhsRemainOrder
1288 || _result.
size != leftDim*rightDim
1290 || !std::equal(_rhs.
dimensions.begin() + rhsRemainStart, _rhs.
dimensions.begin() + rhsRemainEnd, _result.
dimensions.begin() + (lhsRemainEnd-lhsRemainStart))
1293 resultDim.reserve(lhsRemainOrder + rhsRemainOrder);
1294 resultDim.insert(resultDim.end(), _lhs.
dimensions.begin() + lhsRemainStart, _lhs.
dimensions.begin() + lhsRemainEnd);
1295 resultDim.insert(resultDim.end(), _rhs.
dimensions.begin() + rhsRemainStart, _rhs.
dimensions.begin() + rhsRemainEnd);
1297 if(&_result == &_lhs || &_result == &_rhs) {
1299 usedResult = tmpResult.get();
1302 usedResult = &_result;
1305 usedResult = &_result;
1350 _result = std::move(*tmpResult);
1356 contract(result, _lhs, _lhsTrans, _rhs, _rhsTrans, _numModes);
1362 REQUIRE(_splitPos <= _input.
degree(),
"Split position must be in range.");
1364 const size_t lhsSize = misc::product(_input.
dimensions, 0, _splitPos);
1365 const size_t rhsSize = misc::product(_input.
dimensions, _splitPos, _input.
degree());
1366 const size_t rank = std::min(lhsSize, rhsSize);
1368 return std::make_tuple(lhsSize, rhsSize, rank);
1376 || _lhs.
degree() != _splitPos+1
1381 newDimU.insert(newDimU.end(), _input.
dimensions.begin(), _input.
dimensions.begin() + _splitPos);
1382 newDimU.push_back(_rank);
1392 newDimVt.push_back(_rank);
1393 newDimVt.insert(newDimVt.end(), _input.
dimensions.begin() + _splitPos, _input.
dimensions.end());
1399 std::unique_ptr<
value_t[]>&& _rhsData,
const Tensor& _input,
const size_t _splitPos,
const size_t _rank) {
1402 newDim.push_back(_rank);
1403 _lhs.
reset(std::move(newDim), std::move(_lhsData));
1406 newDim.push_back(_rank);
1408 _rhs.
reset(std::move(newDim), std::move(_rhsData));
1412 std::map<size_t, value_t>&& _rhsData,
const Tensor& _input,
const size_t _splitPos,
const size_t _rank) {
1415 newDim.push_back(_rank);
1416 _lhs.
reset(std::move(newDim), std::move(_lhsData));
1419 newDim.push_back(_rank);
1421 _rhs.
reset(std::move(newDim), std::move(_rhsData));
1425 REQUIRE(0 <= _eps && _eps < 1,
"Epsilon must be fullfill 0 <= _eps < 1.");
1427 size_t lhsSize, rhsSize, rank;
1430 std::unique_ptr<value_t[]> tmpS(
new value_t[rank]);
1436 size_t min = std::min(lhsSize, rhsSize);
1437 size_t max = std::max(lhsSize, rhsSize);
1465 rank = std::min(rank, _maxRank);
1469 for(
size_t j = 1; j < rank; ++j) {
1470 if (tmpS[j] <= _eps*tmpS[0]) {
1478 for(
size_t i = 0; i < rank; ++i) {
1479 _S[i*rank+i] = std::abs(_input.
factor)*tmpS[i];
1483 if(_input.
factor < 0.0) {
1493 size_t lhsSize, rhsSize, rank;
1496 std::map<size_t, double> qdata, rdata;
1498 INTERNAL_CHECK(rank == std::min(lhsSize, rhsSize),
"IE, sparse qr reduced rank");
1512 size_t lhsSize, rhsSize, rank;
1517 LOG_ONCE(warning,
"Sparse RQ not yet implemented. falling back to the dense variant");
1529 size_t lhsSize, rhsSize, rank;
1533 std::map<size_t, double> qdata, cdata;
1539 std::unique_ptr<double[]> QData, CData;
1549 size_t lhsSize, rhsSize, rank;
1553 std::map<size_t, double> qdata, cdata;
1559 std::unique_ptr<double[]> CData, QData;
1572 contract(_inverse, Vt,
true, S,
false, 1);
1573 contract(_inverse, _inverse,
false, U,
true, 1);
1584 REQUIRE(&_X != &_B && &_X != &_A,
"Not supportet yet");
1585 const size_t degM = _B.
degree() - _extraDegree;
1586 const size_t degN = _A.
degree() - degM;
1588 REQUIRE(_A.
degree() == degM+degN,
"Inconsistent dimensions.");
1589 REQUIRE(_B.
degree() == degM+_extraDegree,
"Inconsistent dimensions.");
1592 if( _X.
degree() != degN + _extraDegree
1604 const size_t m = misc::product(_A.
dimensions, 0, degM);
1605 const size_t n = misc::product(_A.
dimensions, degM, degM+degN);
1606 const size_t p = misc::product(_B.
dimensions, degM, degM+_extraDegree);
1609 REQUIRE( p == 1,
"Matrix least squares not supported in sparse");
1635 LOG_ONCE(warning,
"Sparse RHS not yet implemented (casting to dense)");
1659 REQUIRE(&_X != &_B && &_X != &_A,
"Not supportet yet");
1660 const size_t degM = _B.
degree() - _extraDegree;
1661 const size_t degN = _A.
degree() - degM;
1663 REQUIRE(_A.
degree() == degM+degN,
"Inconsistent dimensions.");
1664 REQUIRE(_B.
degree() == degM+_extraDegree,
"Inconsistent dimensions.");
1667 if( _X.
degree() != degN + _extraDegree
1678 const size_t m = misc::product(_A.
dimensions, 0, degM);
1679 const size_t n = misc::product(_A.
dimensions, degM, degM+degN);
1680 const size_t p = misc::product(_B.
dimensions, degM, degM+_extraDegree);
1690 LOG_ONCE(warning,
"Sparse RHS not yet implemented (casting to dense)");
1716 for(
size_t i = 0; i < result.
size; ++i) {
1717 dataPtrA[i] *= dataPtrB[i];
1724 for(std::pair<const size_t, value_t>& entry : result.
get_sparse_data()) {
1725 entry.second *= _B[entry.first];
1732 for(std::pair<const size_t, value_t>& entry : result.
get_sparse_data()) {
1733 entry.second *= _A[entry.first];
1740 "The dimensions of the compared tensors don't match: " << _a.
dimensions <<
" vs. " << _b.
dimensions <<
" and " << _a.
size <<
" vs. " << _b.
size);
1747 "The dimensions of the compared tensors don't match: " << _a.
dimensions <<
" vs. " << _b.
dimensions <<
" and " << _a.
size <<
" vs. " << _b.
size);
1758 for(
size_t i=0; i < _a.
size; ++i) {
1766 if(_tensor.
size != _values.size()) {
return false; }
1767 for(
size_t i = 0; i < _tensor.
size; ++i) {
1782 if(_format == FileFormat::TSV) {
1783 _stream << std::setprecision(std::numeric_limits<value_t>::digits10 + 1);
1787 write_to_stream<size_t>(_stream, 1, _format);
1792 write_to_stream<size_t>(_stream, 1, _format);
1793 for (
size_t i = 0; i < _obj.
size; ++i) {
1794 write_to_stream<value_t>(_stream, _obj[i], _format);
1797 write_to_stream<size_t>(_stream, 2, _format);
1800 write_to_stream<size_t>(_stream, d.first, _format);
1801 write_to_stream<value_t>(_stream, _obj.
factor*d.second, _format);
1808 IF_CHECK(
size_t ver = )read_from_stream<size_t>(_stream, _format);
1809 REQUIRE(ver == 1,
"Unknown stream version to open (" << ver <<
")");
1812 std::vector<size_t> dims;
1816 const size_t rep = read_from_stream<size_t>(_stream, _format);
1821 if(_format == FileFormat::TSV) {
1822 for (
size_t i = 0; i < _obj.
size; ++i) {
1828 REQUIRE(_stream,
"Unexpected end of stream in reading dense Tensor.");
1830 REQUIRE(rep == 2,
"Unknown tensor representation " << rep <<
" in stream");
1833 const uint64 num = read_from_stream<size_t>(_stream, _format);
1834 REQUIRE(num < std::numeric_limits<size_t>::max(),
"The stored Tensor is to large to be loaded using 32 Bit xerus.");
1836 for (
size_t i = 0; i < num; ++i) {
1837 REQUIRE(_stream,
"Unexpected end of stream in reading sparse Tensor.");
1839 uint64 pos = read_from_stream<size_t>(_stream, _format);
1840 value_t val = read_from_stream<value_t>(_stream, _format);
1843 REQUIRE(_stream,
"Unexpected end of stream in reading dense Tensor.");
Header file for some additional math functions.
std::tuple< std::unique_ptr< double[]>, std::unique_ptr< double[]>, size_t > cq(const double *const _A, const size_t _m, const size_t _n)
: splits A = C*Q, with _C an rxm matrix (where r is the rank of _A) and _Q orthogonal.
Initialisation
Flags determining the initialisation of the data of Tensor objects.
Header file for CHECK and REQUIRE macros.
void use_dense_representation_if_desirable()
Converts the Tensor to a dense representation if sparsity * sparsityFactor >= size.
std::tuple< std::unique_ptr< double[]>, std::unique_ptr< double[]>, size_t > qc(const double *const _A, const size_t _m, const size_t _n)
: splits A = Q*C, with _C an rxn matrix (where r is the rank of _A) and _Q orthogonal.
Header file for the blas and lapack wrapper functions.
Tensor sparse_copy() const
Returns a copy of this Tensor that uses a sparse representation.
value_t frob_norm() const
Calculates the frobenious norm of the tensor.
value_t factor
Single value representing a constant scaling factor.
static size_t sparsityFactor
size_t degree() const
Returns the degree of the tensor.
void ensure_own_data()
Ensures that this tensor is the sole owner of its data. If needed new space is allocated and all entr...
void pseudo_inverse(Tensor &_inverse, const Tensor &_input, const size_t _splitPos)
Low-Level calculation of the pseudo inverse of a given Tensor.
static void add_sparse_to_sparse(const std::shared_ptr< std::map< size_t, value_t >> &_sum, const value_t _factor, const std::shared_ptr< const std::map< size_t, value_t >> &_summand)
Adds the given sparse data to the given sparse data.
void qr(double *const _Q, double *const _R, const double *const _A, const size_t _m, const size_t _n)
: Performs (Q,R) = QR(A)
static Tensor XERUS_warn_unused identity(DimensionTuple _dimensions)
: Returns a Tensor representation of the identity operator with the given dimensions.
Header file for the Index class.
Header file for the standard contaienr support functions.
std::string to_string() const
Creates a string representation of the Tensor.
XERUS_force_inline std::tuple< size_t, size_t, size_t > calculate_factorization_sizes(const Tensor &_input, const size_t _splitPos)
Very general class used to represent arbitary tensor networks.
size_t size
Size of the Tensor – always equal to the product of the dimensions.
Internal representation of an readable indexed Tensor or TensorNetwork.
Tensor & operator=(const Tensor &_other)=default
Standard assignment operator.
void stream_reader(std::istream &_stream, Tensor &_obj, const FileFormat _format)
tries to restore the tensor from a stream of data.
value_t one_norm() const
Calculates the 1-norm of the tensor.
void matrix_matrix_product(double *const _C, const size_t _leftDim, const size_t _rightDim, const double _alpha, const double *const _A, const size_t _lda, const bool _transposeA, const size_t _middleDim, const double *const _B, const size_t _ldb, const bool _transposeB)
: Performs the Matrix-Matrix product C = alpha*OP(A) * OP(B)
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
void calculate_rq(Tensor &_R, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level RQ calculation of a given Tensor _input = _R _Q.
void calculate_qr(Tensor &_Q, Tensor &_R, Tensor _input, const size_t _splitPos)
Low-Level QR calculation of a given Tensor _input = _Q _R.
Tensor(const Representation _representation=Representation::Sparse)
Constructs an order zero Tensor with the given inital representation.
bool approx_equal(T _a, T _b, T _eps=4 *std::numeric_limits< T >::epsilon()) noexcept
: Checks whether the relative difference between _a and _b (i.e. |a-b|/(|a|/2+|b|/2)) is smaller than...
The main namespace of xerus.
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
value_t * get_dense_data()
Returns a pointer for direct access to the dense data array in row major order.
void resize_mode(const size_t _mode, const size_t _newDim, size_t _cutPos=~0ul)
Resizes a specific mode of the Tensor.
value_t cat(const size_t _position) const
Unsanitized read access to a single entry.
value_t & operator[](const size_t _position)
Read/Write access a single entry.
void fix_mode(const size_t _mode, const size_t _slatePosition)
Fixes a specific mode to a specific value, effectively reducing the order by one. ...
std::map< size_t, value_t > & get_unsanitized_sparse_data()
Gives access to the internal sparse map, without any checks.
constexpr bool hard_not_equal(const T _a, const T _b) noexcept
: Checks for hard inequality ( != operator) without compiler warnings.
internal::IndexedTensor< Tensor > operator()(args... _args)
Indexes the Tensor for read/write use.
XERUS_force_inline void prepare_factorization_output(Tensor &_lhs, Tensor &_rhs, const Tensor &_input, const size_t _splitPos, const size_t _rank, Tensor::Representation _rep)
const std::shared_ptr< value_t > & get_internal_dense_data()
Gives access to the internal shared data pointer, without any checks.
void ensure_own_data_and_apply_factor()
Checks whether there is a non-trivial factor and applies it. Even if no factor is applied ensure_own_...
Tensor operator*(const value_t _factor, Tensor _tensor)
Calculates the entrywise multiplication of the Tensor _tensor with the constant _factor.
Tensor & operator/=(const value_t _divisor)
Performs the entrywise divison by a constant _divisor.
std::vector< size_t > DimensionTuple
: Represention of the dimensions of a Tensor.
void ensure_own_data_no_copy()
Ensures that this tensor is the sole owner of its data space. If needed new space is allocated with e...
std::vector< size_t > get_step_sizes(const Tensor::DimensionTuple &_dimensions)
void apply_factor()
Checks whether there is a non-trivial scaling factor and applies it if nessecary. ...
void array_deleter_vt(value_t *const _toDelete)
Internal deleter function, needed because std::shared_ptr misses an array overload.
void modify_diagonal_entries(const std::function< void(value_t &)> &_f)
Modifies the diagonal entries according to the given function.
XERUS_force_inline void read_from_stream(std::istream &_stream, T &_obj, const FileFormat _format)
static Tensor XERUS_warn_unused ones(DimensionTuple _dimensions)
: Returns a Tensor with all entries equal to one.
std::ostream & operator<<(std::ostream &_out, const xerus::Index &_idx)
Allows to pretty print indices, giving the valueId and span.
void reinterpret_dimensions(DimensionTuple _newDimensions)
Reinterprets the dimensions of the tensor.
void solve(internal::IndexedTensorWritable< Tensor > &&_x, internal::IndexedTensorReadOnly< Tensor > &&_A, internal::IndexedTensorReadOnly< Tensor > &&_b)
static Tensor XERUS_warn_unused kronecker(DimensionTuple _dimensions)
: Returns a Tensor representation of the kronecker delta.
void rq(double *const _R, double *const _Q, const double *const _A, const size_t _m, const size_t _n)
: Performs (R,Q) = RQ(A)
size_t reorder_cost() const
Approximates the cost to reorder the tensor.
Header file for the Tensor class.
Internal representation of an readable and writeable indexed Tensor or TensorNetwork.
void calculate_cq(Tensor &_C, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level CQ calculation of a given Tensor _input = _C _Q.
static Tensor XERUS_warn_unused dirac(DimensionTuple _dimensions, const MultiIndex &_position)
: Returns a Tensor with a single entry equals one and all other zero.
double one_norm(const double *const _x, const size_t _n)
: Computes the one norm =||x||_1
void contract(Tensor &_result, const Tensor &_lhs, const bool _lhsTrans, const Tensor &_rhs, const bool _rhsTrans, const size_t _numModes)
Low-level contraction between Tensors.
void reset()
Resets the tensor as if default initialized.
static void add_sparse_to_full(const std::shared_ptr< value_t > &_denseData, const value_t _factor, const std::shared_ptr< const std::map< size_t, value_t >> &_sparseData)
Adds the given sparse data to the given full data.
FileFormat
possible file formats for tensor storage
Tensor & operator*=(const value_t _factor)
Performs the entrywise multiplication with a constant _factor.
size_t count_non_zero_entries(const value_t _eps=std::numeric_limits< value_t >::epsilon()) const
Determines the number of non-zero entries.
void copy_scaled(T *const __restrict _x, const T _alpha, const T *const _y, const size_t _n) noexcept
Copys _n entries from _y to _x, simulationously scaling each entry by the factor _alpha. I.e x = alpha*y.
static std::tuple< std::map< size_t, double >, std::map< size_t, double >, size_t > cq(const std::map< size_t, double > &_A, const bool _transposeA, size_t _m, size_t _n, bool _fullrank)
calculates the sparse CQ decomposition
bool has_factor() const
Checks whether the tensor has a non-trivial global scaling factor.
void solve(double *_x, const double *_A, size_t _m, size_t _n, const double *_b, size_t _p)
: Solves Ax = b for x
Tensor operator-(Tensor _lhs, const Tensor &_rhs)
Calculates the entrywise difference between _lhs and _rhs.
Tensor & operator+=(const Tensor &_other)
Adds the _other Tensor entrywise to this one.
void reset(DimensionTuple _newDim, const Representation _representation, const Initialisation _init=Initialisation::Zero)
Resets the tensor to the given dimensions and representation.
static size_t multiIndex_to_position(const MultiIndex &_multiIndex, const DimensionTuple &_dimensions)
Header file for the low level array support function.
Header file for comfort functions and macros that should not be exported in the library.
Header file for sparse matrix times dense matrix wrapper functions.
constexpr const value_t EPSILON
The default epsilon value in xerus.
Tensor operator+(Tensor _lhs, const Tensor &_rhs)
Calculates the entrywise sum of _lhs and _rhs.
void scale(T *const __restrict _x, const T _alpha, const size_t _n) noexcept
Scales _n entries of _x by the factor _alpha. I.e. x = alpha*x.
std::map< size_t, value_t > & override_sparse_data()
Returns a pointer to the internal sparse data map for complete rewrite purpose ONLY.
bool all_entries_valid() const
Checks the tensor for illegal entries, e.g. nan, inf,...
static void solve_dense_rhs(double *_x, size_t _xDim, const std::map< size_t, double > &_A, const bool _transposeA, const double *_b, size_t _bDim)
solve operator / for dense right hand sites
const std::shared_ptr< std::map< size_t, value_t > > & get_internal_sparse_data()
Gives access to the internal shared sparse data pointer, without any checks.
double two_norm(const double *const _x, const size_t _n)
: Computes the two norm =||x||_2
constexpr T sqr(const T &_a) noexcept
: Calculates _a*_a.
value_t & at(const size_t _position)
Unsanitized access to a single entry.
Header file for suitesparse wrapper functions.
static void matrix_matrix_product(std::map< size_t, double > &_C, const size_t _leftDim, const size_t _rightDim, const double _alpha, const std::map< size_t, double > &_A, const bool _transposeA, const size_t _midDim, const std::map< size_t, double > &_B, const bool _transposeB)
Calculates the Matrix Matrix product between two sparse matrices.
double value_t
The type of values to be used by xerus.
void use_sparse_representation(const value_t _eps=std::numeric_limits< value_t >::epsilon())
Converts the Tensor to a sparse representation.
XERUS_force_inline void write_to_stream(std::ostream &_stream, const T &_value, FileFormat _format)
void calculate_svd(Tensor &_U, Tensor &_S, Tensor &_Vt, Tensor _input, const size_t _splitPos, const size_t _maxRank, const value_t _eps)
Low-Level SVD calculation of a given Tensor _input = _U _S _Vt.
void svd(double *const _U, double *const _S, double *const _Vt, const double *const _A, const size_t _m, const size_t _n)
: Performs (U,S,V) = SVD(A)
void stream_writer(std::ostream &_stream, const Tensor &_obj, const FileFormat _format)
pipes all information necessary to restore the current tensor into _stream.
bool approx_entrywise_equal(const xerus::Tensor &_a, const xerus::Tensor &_b, const xerus::value_t _eps=EPSILON)
Checks whether two Tensors are approximately entrywise equal.
bool is_dense() const
Returns whether the current representation is dense.
bool approx_equal(const xerus::Tensor &_a, const xerus::Tensor &_b, const xerus::value_t _eps=EPSILON)
Checks whether two tensors are approximately equal.
void solve_least_squares(Tensor &_X, const Tensor &_A, const Tensor &_B, const size_t _extraDegree=0)
Solves the least squares problem ||_A _X - _B||_F.
std::map< size_t, value_t > & get_sparse_data()
Returns a reference for direct access to the sparse data map.
XERUS_force_inline void set_factorization_output(Tensor &_lhs, std::unique_ptr< value_t[]> &&_lhsData, Tensor &_rhs, std::unique_ptr< value_t[]> &&_rhsData, const Tensor &_input, const size_t _splitPos, const size_t _rank)
void offset_add(const Tensor &_other, const std::vector< size_t > &_offsets)
Adds the given Tensor with the given offsets to this one.
void copy(T *const __restrict _dest, const T *const __restrict _src, const size_t _n) noexcept
Copys _n entries from _y to _x, where _y and _x must be disjunkt memory regions.
static std::tuple< std::map< size_t, double >, std::map< size_t, double >, size_t > qc(const std::map< size_t, double > &_A, const bool _transposeA, size_t _m, size_t _n, bool _fullrank)
calculates the sparse QR decomposition (or qc if fullrank = false)
static XERUS_force_inline std::string to_string(const bool obj)
bool is_sparse() const
Returns whether the current representation is sparse.
void set_zero(T *const __restrict _x, const size_t _n) noexcept
Sets all entries equal to zero.
static void plus_minus_equal(Tensor &_me, const Tensor &_other)
static MultiIndex position_to_multiIndex(size_t _position, const DimensionTuple &_dimensions)
Tensor dense_copy() const
Returns a copy of this Tensor that uses a dense representation.
void add_scaled(T *const __restrict _x, const T _alpha, const T *const __restrict _y, const size_t _n) noexcept
Adds _n entries of _y, scaled by _alpha to the ones of _x. I.e. x += alpha*y.
void remove_slate(const size_t _mode, const size_t _pos)
Removes a single slate from the Tensor, reducing dimension[_mode] by one.
void perform_trace(size_t _firstMode, size_t _secondMode)
Performs the trace over the given modes.
value_t * get_unsanitized_dense_data()
Gives access to the internal data pointer, without any checks.
void modify_entries(const std::function< void(value_t &)> &_f)
Modifies every entry according to the given function.
Tensor & operator-=(const Tensor &_other)
Subtracts the _other Tensor entrywise from this one.
Header file for the TensorNetwork class.
static void solve_sparse_rhs(std::map< size_t, double > &_x, size_t _xDim, const std::map< size_t, double > &_A, const bool _transposeA, const std::map< size_t, double > &_b, size_t _bDim)
solve operator / for sparse right hand sites
Tensor entrywise_product(const Tensor &_A, const Tensor &_B)
calculates the entrywise product of two Tensors
#define XERUS_force_inline
Collection of attributes to force gcc to inline a specific function.
void matrix_matrix_product(double *const _C, const size_t _leftDim, const size_t _rightDim, const double _alpha, const std::map< size_t, double > &_A, const bool _transposeA, const size_t _midDim, const double *const _B, const bool _transposeB)
std::vector< size_t > MultiIndex
: Represention of a MultiIndex, i.e. the tuple of positions for each dimension determining a single p...
value_t * override_dense_data()
Returns a pointer to the internal dense data array for complete rewrite purpose ONLY.
Representation representation
The current representation of the Tensor (i.e Dense or Sparse)
void use_dense_representation()
Converts the Tensor to a dense representation.
constexpr T pow(const T &_base, const uint32 _exp) noexcept
: Calculates _base^_exp by binary exponentiation
size_t sparsity() const
Returns the number currently saved entries.
internal::IndexedTensorMoveable< Tensor > operator/(internal::IndexedTensorReadOnly< Tensor > &&_b, internal::IndexedTensorReadOnly< Tensor > &&_A)
Representation
Flags indicating the internal representation of the data of Tensor objects.
void calculate_qc(Tensor &_Q, Tensor &_C, Tensor _input, const size_t _splitPos)
Low-Level QC calculation of a given Tensor _input = _Q _C.
void solve_least_squares(double *const _x, const double *const _A, const size_t _m, const size_t _n, const double *const _b, const size_t _p)
: Solves min ||Ax - b||_2 for x