39 void increase_indices(
const size_t _i,
value_t*& _outPosition,
const std::vector<size_t>& _stepSizes,
const std::vector<size_t>& _baseDimensions ) {
40 size_t index = _stepSizes.size()-1;
41 _outPosition += _stepSizes[index];
42 size_t multStep = _baseDimensions[index];
43 while(_i%multStep == 0) {
44 _outPosition -= _baseDimensions[index]*_stepSizes[index];
46 _outPosition += _stepSizes[index];
47 multStep *= _baseDimensions[index];
58 for(
size_t x = 0; x < _base.
degree(); ++x) {
59 REQUIRE(_shuffle[x] < _base.
degree(), _shuffle[x] <<
" is no valid new position!");
60 REQUIRE(misc::count(_shuffle, x) == 1, x <<
" illegally appeared twice.");
65 long lastShuffled = -1;
66 for(
size_t i = 0; i < _shuffle.size(); ++i) {
67 if(_shuffle[i] != i) { lastShuffled = long(i) ; }
70 const size_t numToShuffle = size_t(lastShuffled+1);
71 const size_t lastToShuffle = _base.
dimensions.size();
72 const size_t blockSize = misc::product(_base.
dimensions, numToShuffle, lastToShuffle);
74 if(blockSize == _base.
size) {
79 std::vector<size_t> outDimensions(_base.
degree());
80 for(
size_t i = 0; i < _base.
degree(); ++i ) {
81 outDimensions[_shuffle[i]] = _base.
dimensions[i];
84 std::vector<size_t> stepSizes(numToShuffle);
85 for(
size_t i = 0; i < numToShuffle; ++i ) {
86 stepSizes[i] = misc::product(outDimensions, _shuffle[i]+1, numToShuffle)*blockSize;
90 std::unique_ptr<Tensor> rescueSlot;
93 rescueSlot.reset(
new Tensor(std::move(_out)));
94 usedBase = rescueSlot.get();
102 const size_t numBlocks = misc::product(usedBase->
dimensions, 0, numToShuffle);
107 *outPosition = *basePosition;
108 for(
size_t b = 1; b < numBlocks; ++b) {
110 *outPosition = *(basePosition + b*blockSize);
113 misc::copy(outPosition, basePosition, blockSize);
114 for(
size_t b = 1; b < numBlocks; ++b) {
116 misc::copy(outPosition, basePosition + b*blockSize, blockSize);
125 for(
const auto& entry : baseEntries) {
126 size_t basePosition = entry.first;
127 size_t position = basePosition%blockSize;
128 basePosition /= blockSize;
130 for(
size_t i = numToShuffle; i > 0; --i) {
131 position += (basePosition%usedBase->
dimensions[i-1])*stepSizes[i-1];
134 outEntries.emplace(position, usedBase->
factor*entry.second);
147 inline void increase_indices(
const size_t _i,
const value_t*& _oldPosition,
const std::vector<size_t>& _steps,
const std::vector<size_t>& _multDimensions) {
148 size_t index = _steps.size()-1;
149 _oldPosition += _steps[index];
150 size_t multStep = _multDimensions[index];
151 while(_i%multStep == 0) {
152 _oldPosition -= _multDimensions[index]*_steps[index];
154 _oldPosition += _steps[index];
155 multStep *= _multDimensions[index];
161 const std::vector<size_t>& _doubleSteps,
162 const std::vector<size_t>& _doubleMultDimensions,
163 const size_t _numSummations) {
164 *_newPosition = *_oldPosition;
165 for(
size_t k = 1; k < _numSummations; ++k) {
167 *_newPosition += *_oldPosition;
173 const std::vector<size_t>& _doubleSteps,
174 const std::vector<size_t>& _doubleMultDimensions,
175 const size_t _numSummations,
176 const size_t _orderedIndicesMultDim ) {
177 misc::copy(_newPosition, _oldPosition, _orderedIndicesMultDim);
178 for(
size_t k = 1; k < _numSummations; ++k) {
180 misc::add(_newPosition, _oldPosition, _orderedIndicesMultDim);
186 const size_t*
const _baseIndexDim,
187 const size_t*
const _divisors,
188 const size_t*
const _attributes,
189 const size_t numIndices ) {
191 for(
size_t i = 0; i < numIndices; ++i) {
192 position += ((_entry.first/_divisors[i])%_baseIndexDim[i])*_attributes[i];
198 const std::pair<size_t, value_t>& _entry,
199 const size_t*
const _baseIndexDim,
200 const size_t*
const _divisors,
201 const size_t*
const _attributes,
202 const bool*
const _fixedFlags,
203 const bool*
const _traceFlags,
204 const size_t numIndices ) {
207 for(
size_t i = 0; i < numIndices; ++i) {
208 const size_t indexPosition = (_entry.first/_divisors[i])%_baseIndexDim[i];
212 if(indexPosition != _attributes[i]) {
216 }
else if(_traceFlags[i]) {
217 if(indexPosition != (_entry.first/_divisors[_attributes[i]])%_baseIndexDim[_attributes[i]]) {
221 _position += indexPosition*_attributes[i];
228 std::vector<size_t> dimensions;
229 dimensions.reserve(_indices.size());
230 for(
const Index& idx : _indices) {
231 dimensions.emplace_back(idx.dimension());
237 std::vector<size_t> stepSizes(_indices.size());
238 if(!_indices.empty()) {
239 stepSizes.back() = 1;
240 for(
size_t i = stepSizes.size(); i > 1; --i) {
241 stepSizes[i-2] = stepSizes[i-1]*_indices[i-1].dimension();
250 _base.assign_indices();
251 _out.assign_indices();
255 _base.assign_index_dimensions();
256 _out.assign_index_dimensions();
258 #ifndef XERUS_DISABLE_RUNTIME_CHECKS // Performe complete check whether the input is valid 260 for(
size_t i = 0; i < _base.indices.size(); ++i) {
262 if(_base.indices[i].fixed()) {
263 REQUIRE(_base.indices[i].span == 1,
"Fixed indices must have span one. Indices are: " << _base.indices <<
", total should be " << _base.indices.size() <<
". The problem is: " << _base.indices[i] <<
" -- " << _base.indices[i].fixed());
269 while(j < _out.indices.size() && _base.indices[i] != _out.indices[j]) { ++j; }
270 if(j < _out.indices.size()) {
271 REQUIRE(_base.indices[i].dimension() == _out.indices[j].dimension(),
"The indexDimensions in the target and base of evaluation must coincide. Here " << _base.indices[i].dimension() <<
"!=" << _out.indices[j].dimension() <<
". For index " << _base.indices[i] <<
" == " << _out.indices[j]);
272 REQUIRE(_base.indices[i].span == _out.indices[j].span,
"The indexSpans in the target and base of evaluation must coincide.");
273 REQUIRE(_base.indices[i].open(),
"Indices appearing in the target of evaluation must not be part of a trace nor be fixed. Base: " << _base.indices <<
" Out: " << _out.indices);
279 while(j < _base.indices.size() && (i == j || _base.indices[i] != _base.indices[j])) { ++j; }
280 REQUIRE(j < _base.indices.size(),
"All indices of evalutation base must either be fixed, appear in the target or be part of a trace. Base: " << _base.indices <<
" Out: " << _out.indices);
281 REQUIRE(misc::count(_base.indices, _base.indices[i]) == 2,
"Indices must appear at most two times. Base: " << _base.indices <<
" Out: " << _out.indices);
282 REQUIRE(_base.indices[i].dimension() == _base.indices[j].dimension(),
"The indexDimensions of two traced indices must conince.");
283 REQUIRE(_base.indices[i].span == 1 && _base.indices[j].span == 1,
"The indexSpans of traced indices must be one (It is ambigious what a trace of span 2 indices is meant to be).");
287 for(
size_t i = 0; i < _out.indices.size(); ++i) {
288 REQUIRE(_out.indices[i].open(),
"Traces and fixed indices are not allowed in the target of evaluation. Base: " << _base.indices <<
" Out: " << _out.indices);
289 REQUIRE(misc::count(_base.indices, _out.indices[i]) == 1,
"Every index of the target must appear exactly once in the base of evaluation. Base: " << _base.indices <<
" Out: " << _out.indices);
294 if(_base.indices == _out.indices) {
295 *_out.tensorObject = *_base.tensorObjectReadOnly;
300 const std::vector<size_t> baseIndexStepSizes(
get_step_sizes(_base.indices));
304 if(_base.tensorObjectReadOnly->is_dense()) {
309 size_t numOrderedIndices;
310 for(numOrderedIndices = 0; numOrderedIndices < _out.indices.size() && _base.indices[_base.indices.size()-1-numOrderedIndices] == _out.indices[_out.indices.size()-1-numOrderedIndices]; ++numOrderedIndices) { }
312 const size_t orderedIndexDim = baseIndexStepSizes[_base.indices.size()-numOrderedIndices-1];
314 std::vector<size_t> stepSizes(_out.indices.size()-numOrderedIndices);
316 size_t fixedIndexOffset = 0;
318 std::vector<size_t> traceStepSizes;
319 std::vector<size_t> traceDimensions;
320 size_t totalTraceDim = 1;
323 for(
size_t i = 0; i < _base.indices.size()-numOrderedIndices; ++i) {
326 while(outPos < _out.indices.size() && _base.indices[i] != _out.indices[outPos]) { ++outPos; }
328 if(outPos < _out.indices.size()) {
329 stepSizes[outPos] = baseIndexStepSizes[i];
330 }
else if(_base.indices[i].fixed()) {
331 fixedIndexOffset += _base.indices[i].fixed_position()*baseIndexStepSizes[i];
333 for(
size_t j = i+1; j < _base.indices.size()-numOrderedIndices; ++j) {
334 if(_base.indices[i] == _base.indices[j]) {
335 traceStepSizes.emplace_back(baseIndexStepSizes[i]+baseIndexStepSizes[j]);
336 traceDimensions.emplace_back(_base.indices[i].dimension());
337 totalTraceDim *= _base.indices[i].dimension();
348 const value_t* oldPosition = _base.tensorObjectReadOnly->get_unsanitized_dense_data()+fixedIndexOffset;
349 std::shared_ptr<value_t> delaySlot;
350 if(_base.tensorObjectReadOnly == _out.tensorObjectReadOnly) { delaySlot = _out.tensorObject->get_internal_dense_data(); }
351 value_t*
const newPosition = _out.tensorObject->override_dense_data();
354 const size_t numSteps = _out.tensorObject->size/orderedIndexDim;
356 if(orderedIndexDim == 1) {
357 if(totalTraceDim == 1) {
358 newPosition[0] = *oldPosition;
359 for(
size_t i = 1; i < numSteps; ++i) {
361 newPosition[i] = *oldPosition;
364 sum_traces(newPosition, oldPosition, traceStepSizes, traceDimensions, totalTraceDim);
365 for(
size_t i = 1; i < numSteps; ++i) {
367 sum_traces(newPosition+i, oldPosition, traceStepSizes, traceDimensions, totalTraceDim);
371 if(totalTraceDim == 1) {
372 misc::copy(newPosition, oldPosition, orderedIndexDim);
373 for(
size_t i = 1; i < numSteps; ++i) {
375 misc::copy(newPosition + i*orderedIndexDim, oldPosition, orderedIndexDim);
378 sum_traces(newPosition, oldPosition, traceStepSizes, traceDimensions, totalTraceDim, orderedIndexDim);
379 for(
size_t i = 1; i < numSteps; ++i) {
381 sum_traces(newPosition + i*orderedIndexDim, oldPosition, traceStepSizes, traceDimensions, totalTraceDim, orderedIndexDim);
389 _out.tensorObject->factor = _base.tensorObjectReadOnly->factor;
392 else if(_base.tensorObjectReadOnly->is_sparse()) {
393 #define VLA(T, name) auto name##_store = xerus::misc::make_unique_array(new T); const auto & (name) = name##_store.get(); 394 VLA(
bool[_base.indices.size()] , fixedFlags);
395 VLA(
bool[_base.indices.size()] , traceFlags);
396 VLA(
size_t[_base.indices.size()], attributes);
398 bool peacefullIndices =
true;
400 const std::vector<size_t> outIndexStepSizes(
get_step_sizes(_out.indices));
403 for(
size_t i = 0; i < _base.indices.size(); ++i) {
405 if(_base.indices[i].fixed()) {
406 fixedFlags[i] =
true;
407 traceFlags[i] =
false;
408 attributes[i] = _base.indices[i].fixed_position();
409 peacefullIndices =
false;
415 while(outPos < _out.indices.size() && _out.indices[outPos] != _base.indices[i]) { ++outPos; }
416 if(outPos < _out.indices.size()) {
417 fixedFlags[i] =
false;
418 traceFlags[i] =
false;
419 attributes[i] = outIndexStepSizes[outPos];
424 fixedFlags[i] =
false;
425 traceFlags[i] =
true;
426 peacefullIndices =
false;
427 for(attributes[i] = 0; _base.indices[i] != _base.indices[attributes[i]] || attributes[i] == i; ++attributes[i]) { }
431 const std::map<size_t, value_t>& baseEntries = _base.tensorObjectReadOnly->get_unsanitized_sparse_data();
432 std::shared_ptr<std::map<size_t, value_t>> delaySlot;
433 if(_base.tensorObjectReadOnly == _out.tensorObjectReadOnly) { delaySlot = _out.tensorObject->get_internal_sparse_data(); }
434 std::map<size_t, value_t>& outEntries = _out.tensorObject->override_sparse_data();
436 const value_t factor = _base.tensorObjectReadOnly->factor;
441 if(peacefullIndices) {
442 for(
const auto& entry : baseEntries) {
443 outEntries.emplace(
get_position(entry, baseIndexDimensions.data(), baseIndexStepSizes.data(), attributes, _base.indices.size()), factor*entry.second);
447 for(
const auto& entry : baseEntries) {
448 if(
check_position(newPosition, entry, baseIndexDimensions.data(), baseIndexStepSizes.data(), attributes, fixedFlags, traceFlags, _base.indices.size())) {
449 outEntries[newPosition] += factor*entry.second;
Header file for CHECK and REQUIRE macros.
void add(T *const __restrict _x, const T *const __restrict _y, const size_t _n) noexcept
Adds _n entries of _y to the ones of _x. I.e. x += y.
value_t factor
Single value representing a constant scaling factor.
size_t degree() const
Returns the degree of the tensor.
void reshuffle(Tensor &_out, const Tensor &_base, const std::vector< size_t > &_shuffle)
: Performs a simple reshuffle. Much less powerfull then a full evaluate, but more efficient...
Header file for the Index class.
Header file for the standard contaienr support functions.
void increase_indices(const size_t _i, const value_t *&_oldPosition, const std::vector< size_t > &_steps, const std::vector< size_t > &_multDimensions)
std::vector< size_t > get_step_sizes(const std::vector< Index > &_indices)
size_t size
Size of the Tensor – always equal to the product of the dimensions.
Internal representation of an readable indexed Tensor or TensorNetwork.
bool check_position(size_t &_position, const std::pair< size_t, value_t > &_entry, const size_t *const _baseIndexDim, const size_t *const _divisors, const size_t *const _attributes, const bool *const _fixedFlags, const bool *const _traceFlags, const size_t numIndices)
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
The main namespace of xerus.
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
std::map< size_t, value_t > & get_unsanitized_sparse_data()
Gives access to the internal sparse map, without any checks.
size_t get_position(const std::pair< size_t, value_t > &_entry, const size_t *const _baseIndexDim, const size_t *const _divisors, const size_t *const _attributes, const size_t numIndices)
void evaluate(IndexedTensorWritable< Tensor > &&_out, IndexedTensorReadOnly< Tensor > &&_base)
Header file for the Tensor class.
void reset(DimensionTuple _newDim, const Representation _representation, const Initialisation _init=Initialisation::Zero)
Resets the tensor to the given dimensions and representation.
void increase_indices(const size_t _i, value_t *&_outPosition, const std::vector< size_t > &_stepSizes, const std::vector< size_t > &_baseDimensions)
void sum_traces(value_t *const _newPosition, const value_t *_oldPosition, const std::vector< size_t > &_doubleSteps, const std::vector< size_t > &_doubleMultDimensions, const size_t _numSummations)
Header file for the low level array support function.
Header file for comfort functions and macros that should not be exported in the library.
std::map< size_t, value_t > & override_sparse_data()
Returns a pointer to the internal sparse data map for complete rewrite purpose ONLY.
Abstract internal representation of an read and writeable indexed Tensor or TensorNetwork.
Header file for shorthand notations that are xerus specific but used throughout the library...
double value_t
The type of values to be used by xerus.
Class used to represent indices that can be used to write tensor calculations in index notation...
bool is_dense() const
Returns whether the current representation is dense.
void copy(T *const __restrict _dest, const T *const __restrict _src, const size_t _n) noexcept
Copys _n entries from _y to _x, where _y and _x must be disjunkt memory regions.
static XERUS_force_inline std::string to_string(const bool obj)
value_t * get_unsanitized_dense_data()
Gives access to the internal data pointer, without any checks.
std::vector< size_t > get_dimension_vector(const std::vector< Index > &_indices)
value_t * override_dense_data()
Returns a pointer to the internal dense data array for complete rewrite purpose ONLY.
Representation representation
The current representation of the Tensor (i.e Dense or Sparse)