56 void contract(Tensor& _result,
const Tensor& _lhs,
const bool _lhsTrans,
const Tensor& _rhs,
const bool _rhsTrans,
const size_t _numModes);
57 Tensor
contract(
const Tensor& _lhs,
const bool _lhsTrans,
const Tensor& _rhs,
const bool _rhsTrans,
const size_t _numModes);
65 void reshuffle(Tensor& _out,
const Tensor& _base,
const std::vector<size_t>& _shuffle);
66 Tensor
reshuffle(
const Tensor& _base,
const std::vector<size_t>& _shuffle);
116 std::shared_ptr<value_t> denseData;
123 std::shared_ptr<std::map<size_t, value_t>> sparseData;
153 template<XERUS_ADD_MOVE(Vec, DimensionTuple), XERUS_ADD_MOVE(SPtr, std::shared_ptr<value_t>)>
154 explicit Tensor(Vec&& _dimensions, SPtr&& _data)
155 : dimensions(
std::forward<Vec>(_dimensions)), size(misc::product(dimensions)), representation(
Representation::Dense), denseData(
std::forward<SPtr>(_data)) { }
163 explicit Tensor(DimensionTuple _dimensions, std::unique_ptr<
value_t[]>&& _data);
173 explicit Tensor(DimensionTuple _dimensions,
const std::function<
value_t()>& _f);
182 explicit Tensor(DimensionTuple _dimensions,
const std::function<
value_t(
const size_t)>& _f);
191 explicit Tensor(DimensionTuple _dimensions,
const std::function<
value_t(
const MultiIndex&)>& _f);
202 Tensor(DimensionTuple _dimensions,
const size_t _N,
const std::function<std::pair<size_t, value_t>(
size_t,
size_t)>& _f);
212 template<
class distribution=std::normal_distribution<value_t>,
class generator=std::mt19937_64>
215 value_t*
const dataPtr = result.denseData.get();
216 for(
size_t i = 0; i < result.
size; ++i) {
217 dataPtr[i] = _dist(_rnd);
227 template<
class distribution=std::normal_distribution<value_t>,
class generator=std::mt19937_64>
240 template<
class generator=std::mt19937_64>
242 std::vector<size_t> dimensions = _dimensions1;
243 dimensions.insert(dimensions.end(), _dimensions2.begin(), _dimensions2.end());
244 const size_t m = misc::product(_dimensions1);
245 const size_t n = misc::product(_dimensions2);
246 const size_t max = std::max(m,n);
247 const size_t min = std::min(m,n);
250 typename generator::result_type randomness = 0;
251 const size_t restart = size_t(std::log2(generator::max()));
252 for (
size_t i=0; i<min; ++i) {
253 auto idx = i%restart;
257 if (randomness & (1<<idx)) {
264 for (
size_t i=0; i<min-1; ++i) {
273 contract(result, p,
false, result,
false, 1);
279 result.reinterpret_dimensions(std::move(dimensions));
293 template<
class distribution=std::normal_distribution<value_t>,
class generator=std::mt19937_64>
296 XERUS_REQUIRE(_N <= result.
size,
" Cannot create " << _N <<
" non zero entries in a tensor with only " << result.
size <<
" total entries!");
298 std::uniform_int_distribution<size_t> entryDist(0, result.
size-1);
299 while(result.sparseData->size() < _N) {
300 result.sparseData->emplace(entryDist(_rnd), _dist(_rnd));
310 template<
class distribution=std::normal_distribution<value_t>,
class generator=std::mt19937_64>
612 template<
typename... args>
623 template<
typename... args>
692 void reset(DimensionTuple _newDim,
const std::shared_ptr<value_t>& _newData);
700 void reset(DimensionTuple _newDim, std::unique_ptr<
value_t[]>&& _newData);
706 void reset(DimensionTuple _newDim, std::map<size_t, value_t>&& _newData);
727 void resize_mode(
const size_t _mode,
const size_t _newDim,
size_t _cutPos=~0ul);
735 void fix_mode(
const size_t _mode,
const size_t _slatePosition);
743 void remove_slate(
const size_t _mode,
const size_t _pos);
840 static void add_sparse_to_full(
const std::shared_ptr<value_t>& _denseData,
const value_t _factor,
const std::shared_ptr<
const std::map<size_t, value_t>>& _sparseData);
843 static void add_sparse_to_sparse(
const std::shared_ptr<std::map<size_t, value_t>>& _sum,
const value_t _factor,
const std::shared_ptr<
const std::map<size_t, value_t>>& _summand);
870 contract(_result, _lhs,
false, _rhs,
false, _numModes);
874 return contract(_lhs,
false, _rhs,
false, _numModes);
Initialisation
Flags determining the initialisation of the data of Tensor objects.
void use_dense_representation_if_desirable()
Converts the Tensor to a dense representation if sparsity * sparsityFactor >= size.
Tensor sparse_copy() const
Returns a copy of this Tensor that uses a sparse representation.
value_t frob_norm() const
Calculates the frobenious norm of the tensor.
value_t factor
Single value representing a constant scaling factor.
static size_t sparsityFactor
size_t degree() const
Returns the degree of the tensor.
void ensure_own_data()
Ensures that this tensor is the sole owner of its data. If needed new space is allocated and all entr...
void pseudo_inverse(Tensor &_inverse, const Tensor &_input, const size_t _splitPos)
Low-Level calculation of the pseudo inverse of a given Tensor.
static void add_sparse_to_sparse(const std::shared_ptr< std::map< size_t, value_t >> &_sum, const value_t _factor, const std::shared_ptr< const std::map< size_t, value_t >> &_summand)
Adds the given sparse data to the given sparse data.
void reshuffle(Tensor &_out, const Tensor &_base, const std::vector< size_t > &_shuffle)
: Performs a simple reshuffle. Much less powerfull then a full evaluate, but more efficient...
static Tensor XERUS_warn_unused identity(DimensionTuple _dimensions)
: Returns a Tensor representation of the identity operator with the given dimensions.
static Tensor XERUS_warn_unused random_orthogonal(DimensionTuple _dimensions1, DimensionTuple _dimensions2, generator &_rnd=xerus::misc::randomEngine)
Constructs a dense Tensor with the given dimensions and uses the given random generator and distribut...
Header file for the standard contaienr support functions.
std::string to_string() const
Creates a string representation of the Tensor.
static Tensor XERUS_warn_unused random(DimensionTuple _dimensions, distribution &_dist=xerus::misc::defaultNormalDistribution, generator &_rnd=xerus::misc::randomEngine)
Constructs a dense Tensor with the given dimensions and uses the given random generator and distribut...
Very general class used to represent arbitary tensor networks.
size_t size
Size of the Tensor – always equal to the product of the dimensions.
Internal representation of an readable indexed Tensor or TensorNetwork.
Tensor & operator=(const Tensor &_other)=default
Standard assignment operator.
void stream_reader(std::istream &_stream, Tensor &_obj, const FileFormat _format)
tries to restore the tensor from a stream of data.
value_t one_norm() const
Calculates the 1-norm of the tensor.
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
void calculate_rq(Tensor &_R, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level RQ calculation of a given Tensor _input = _R _Q.
void calculate_qr(Tensor &_Q, Tensor &_R, Tensor _input, const size_t _splitPos)
Low-Level QR calculation of a given Tensor _input = _Q _R.
Tensor(const Representation _representation=Representation::Sparse)
Constructs an order zero Tensor with the given inital representation.
The main namespace of xerus.
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
static XERUS_force_inline Tensor XERUS_warn_unused random(std::initializer_list< size_t > &&_dimensions, distribution &_dist=xerus::misc::defaultNormalDistribution, generator &_rnd=xerus::misc::randomEngine)
Constructs a dense Tensor with the given dimensions and uses the given random generator and distribut...
value_t * get_dense_data()
Returns a pointer for direct access to the dense data array in row major order.
void resize_mode(const size_t _mode, const size_t _newDim, size_t _cutPos=~0ul)
Resizes a specific mode of the Tensor.
value_t cat(const size_t _position) const
Unsanitized read access to a single entry.
value_t & operator[](const size_t _position)
Read/Write access a single entry.
void fix_mode(const size_t _mode, const size_t _slatePosition)
Fixes a specific mode to a specific value, effectively reducing the order by one. ...
thread_local std::normal_distribution< double > defaultNormalDistribution
Header file for templates to store and restore objects from / to files / streams. ...
std::map< size_t, value_t > & get_unsanitized_sparse_data()
Gives access to the internal sparse map, without any checks.
internal::IndexedTensor< Tensor > operator()(args... _args)
Indexes the Tensor for read/write use.
Tensor(Vec &&_dimensions, SPtr &&_data)
: Creates a new (dense) tensor with the given dimensions, using a provided data.
const std::shared_ptr< value_t > & get_internal_dense_data()
Gives access to the internal shared data pointer, without any checks.
void ensure_own_data_and_apply_factor()
Checks whether there is a non-trivial factor and applies it. Even if no factor is applied ensure_own_...
Tensor operator*(const value_t _factor, Tensor _tensor)
Calculates the entrywise multiplication of the Tensor _tensor with the constant _factor.
Tensor & operator/=(const value_t _divisor)
Performs the entrywise divison by a constant _divisor.
static Tensor XERUS_warn_unused random(DimensionTuple _dimensions, const size_t _N, distribution &_dist=xerus::misc::defaultNormalDistribution, generator &_rnd=xerus::misc::randomEngine)
Constructs a random sparse Tensor with the given dimensions.
std::vector< size_t > DimensionTuple
: Represention of the dimensions of a Tensor.
void ensure_own_data_no_copy()
Ensures that this tensor is the sole owner of its data space. If needed new space is allocated with e...
void apply_factor()
Checks whether there is a non-trivial scaling factor and applies it if nessecary. ...
void modify_diagonal_entries(const std::function< void(value_t &)> &_f)
Modifies the diagonal entries according to the given function.
static XERUS_force_inline Tensor XERUS_warn_unused random(std::initializer_list< size_t > &&_dimensions, const size_t _N, distribution &_dist, generator &_rnd)
Constructs a random sparse Tensor with the given dimensions.
static Tensor XERUS_warn_unused ones(DimensionTuple _dimensions)
: Returns a Tensor with all entries equal to one.
std::ostream & operator<<(std::ostream &_out, const xerus::Index &_idx)
Allows to pretty print indices, giving the valueId and span.
void reinterpret_dimensions(DimensionTuple _newDimensions)
Reinterprets the dimensions of the tensor.
void solve(internal::IndexedTensorWritable< Tensor > &&_x, internal::IndexedTensorReadOnly< Tensor > &&_A, internal::IndexedTensorReadOnly< Tensor > &&_b)
static Tensor XERUS_warn_unused kronecker(DimensionTuple _dimensions)
: Returns a Tensor representation of the kronecker delta.
size_t reorder_cost() const
Approximates the cost to reorder the tensor.
#define XERUS_warn_unused
Internal representation of an readable and writeable indexed Tensor or TensorNetwork.
void calculate_cq(Tensor &_C, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level CQ calculation of a given Tensor _input = _C _Q.
static Tensor XERUS_warn_unused dirac(DimensionTuple _dimensions, const MultiIndex &_position)
: Returns a Tensor with a single entry equals one and all other zero.
void contract(Tensor &_result, const Tensor &_lhs, const bool _lhsTrans, const Tensor &_rhs, const bool _rhsTrans, const size_t _numModes)
Low-level contraction between Tensors.
void reset()
Resets the tensor as if default initialized.
static void add_sparse_to_full(const std::shared_ptr< value_t > &_denseData, const value_t _factor, const std::shared_ptr< const std::map< size_t, value_t >> &_sparseData)
Adds the given sparse data to the given full data.
FileFormat
possible file formats for tensor storage
Tensor & operator*=(const value_t _factor)
Performs the entrywise multiplication with a constant _factor.
size_t count_non_zero_entries(const value_t _eps=std::numeric_limits< value_t >::epsilon()) const
Determines the number of non-zero entries.
bool has_factor() const
Checks whether the tensor has a non-trivial global scaling factor.
Tensor operator-(Tensor _lhs, const Tensor &_rhs)
Calculates the entrywise difference between _lhs and _rhs.
Tensor & operator+=(const Tensor &_other)
Adds the _other Tensor entrywise to this one.
static size_t multiIndex_to_position(const MultiIndex &_multiIndex, const DimensionTuple &_dimensions)
#define XERUS_REQUIRE(condition, message)
Checks whether condition is true. logs an error otherwise via XERUS_LOG(error, message).
constexpr const value_t EPSILON
The default epsilon value in xerus.
Tensor operator+(Tensor _lhs, const Tensor &_rhs)
Calculates the entrywise sum of _lhs and _rhs.
std::map< size_t, value_t > & override_sparse_data()
Returns a pointer to the internal sparse data map for complete rewrite purpose ONLY.
bool all_entries_valid() const
Checks the tensor for illegal entries, e.g. nan, inf,...
const std::shared_ptr< std::map< size_t, value_t > > & get_internal_sparse_data()
Gives access to the internal shared sparse data pointer, without any checks.
value_t & at(const size_t _position)
Unsanitized access to a single entry.
Header file for shorthand notations that are xerus specific but used throughout the library...
double value_t
The type of values to be used by xerus.
void use_sparse_representation(const value_t _eps=std::numeric_limits< value_t >::epsilon())
Converts the Tensor to a sparse representation.
void calculate_svd(Tensor &_U, Tensor &_S, Tensor &_Vt, Tensor _input, const size_t _splitPos, const size_t _maxRank, const value_t _eps)
Low-Level SVD calculation of a given Tensor _input = _U _S _Vt.
void stream_writer(std::ostream &_stream, const Tensor &_obj, const FileFormat _format)
pipes all information necessary to restore the current tensor into _stream.
bool approx_entrywise_equal(const xerus::Tensor &_a, const xerus::Tensor &_b, const xerus::value_t _eps=EPSILON)
Checks whether two Tensors are approximately entrywise equal.
bool is_dense() const
Returns whether the current representation is dense.
bool approx_equal(const xerus::Tensor &_a, const xerus::Tensor &_b, const xerus::value_t _eps=EPSILON)
Checks whether two tensors are approximately equal.
void solve_least_squares(Tensor &_X, const Tensor &_A, const Tensor &_B, const size_t _extraDegree=0)
Solves the least squares problem ||_A _X - _B||_F.
std::map< size_t, value_t > & get_sparse_data()
Returns a reference for direct access to the sparse data map.
internal::IndexedTensorReadOnly< Tensor > operator()(args... _args) const
Indexes the Tensor for read only use.
void offset_add(const Tensor &_other, const std::vector< size_t > &_offsets)
Adds the given Tensor with the given offsets to this one.
bool is_sparse() const
Returns whether the current representation is sparse.
static void plus_minus_equal(Tensor &_me, const Tensor &_other)
thread_local std::mt19937_64 randomEngine
static MultiIndex position_to_multiIndex(size_t _position, const DimensionTuple &_dimensions)
Tensor dense_copy() const
Returns a copy of this Tensor that uses a dense representation.
void remove_slate(const size_t _mode, const size_t _pos)
Removes a single slate from the Tensor, reducing dimension[_mode] by one.
void perform_trace(size_t _firstMode, size_t _secondMode)
Performs the trace over the given modes.
value_t * get_unsanitized_dense_data()
Gives access to the internal data pointer, without any checks.
Header file for some additional math functions.
void modify_entries(const std::function< void(value_t &)> &_f)
Modifies every entry according to the given function.
Tensor & operator-=(const Tensor &_other)
Subtracts the _other Tensor entrywise from this one.
Tensor entrywise_product(const Tensor &_A, const Tensor &_B)
calculates the entrywise product of two Tensors
#define XERUS_force_inline
Collection of attributes to force gcc to inline a specific function.
std::vector< size_t > MultiIndex
: Represention of a MultiIndex, i.e. the tuple of positions for each dimension determining a single p...
value_t * override_dense_data()
Returns a pointer to the internal dense data array for complete rewrite purpose ONLY.
Representation representation
The current representation of the Tensor (i.e Dense or Sparse)
void use_dense_representation()
Converts the Tensor to a dense representation.
size_t sparsity() const
Returns the number currently saved entries.
internal::IndexedTensorMoveable< Tensor > operator/(internal::IndexedTensorReadOnly< Tensor > &&_b, internal::IndexedTensorReadOnly< Tensor > &&_A)
Header file for the IndexedTensor class.
Representation
Flags indicating the internal representation of the data of Tensor objects.
void calculate_qc(Tensor &_Q, Tensor &_C, Tensor _input, const size_t _splitPos)
Low-Level QC calculation of a given Tensor _input = _Q _C.