26 #ifdef XERUS_REPLACE_ALLOCATOR 32 using xma = xerus::misc::AllocatorStorage;
35 thread_local xerus::misc::AllocatorStorage xm::astore;
36 thread_local
bool programIsRunning =
true;
38 xerus::misc::AllocatorStorage::AllocatorStorage() {
39 for (
unsigned long i=0; i<xma::NUM_BUCKETS; ++i) {
46 xerus::misc::AllocatorStorage::~AllocatorStorage(){ programIsRunning=
false; }
49 using mallocType =
void *(*)(size_t);
50 void* (*xerus::misc::r_malloc)(size_t) = &malloc;
51 using freeType = void (*)(
void*);
52 void (*xerus::misc::r_free)(
void*) = &free;
55 void xerus::misc::AllocatorStorage::create_new_pool() {
56 uint8_t* newPool =
static_cast<uint8_t*
>(xerus::misc::r_malloc(xma::POOL_SIZE));
57 uint8_t* startAddr = newPool+xma::BUCKET_SIZE -
reinterpret_cast<uintptr_t
>(newPool)%xma::ALIGNMENT;
58 pools.emplace_back(newPool, startAddr);
61 void *myalloc(
size_t n) {
62 if (n >= xma::SMALLEST_NOT_CACHED_SIZE) {
63 void *res = xerus::misc::r_malloc(n+xma::ALIGNMENT+1);
66 res =
static_cast<void*
>(
static_cast<uint8_t*
>(res)+xma::ALIGNMENT+1);
67 uintptr_t alignmentOffset =
reinterpret_cast<uintptr_t
>(res)%xma::ALIGNMENT;
68 res =
static_cast<void*
>(
static_cast<uint8_t*
>(res) - alignmentOffset);
71 *(
static_cast<uint8_t*
>(res)-1) = 0xFF;
73 *(
static_cast<uint8_t*
>(res)-2) =
static_cast<uint8_t
>(alignmentOffset);
76 uint8_t numBucket = uint8_t( (n+1)/xma::BUCKET_SIZE );
78 if (xm::astore.buckets[numBucket].empty()) {
79 if (xm::astore.pools.empty() || xm::astore.pools.back().second + (numBucket+1)*xma::BUCKET_SIZE >= xm::astore.pools.back().first + xma::POOL_SIZE) {
80 xm::astore.create_new_pool();
82 res = xm::astore.pools.back().second;
83 xm::astore.pools.back().second += (numBucket+1)*xma::BUCKET_SIZE;
86 res = xm::astore.buckets[numBucket].back();
87 xm::astore.buckets[numBucket].pop_back();
89 #ifdef XERUS_PERFORMANCE_ANALYSIS 90 xm::astore.allocCount[numBucket] += 1;
91 xm::astore.currAlloc[numBucket] += 1;
92 if (xm::astore.currAlloc[numBucket] > xm::astore.maxAlloc[numBucket]) {
93 xm::astore.maxAlloc[numBucket] = xm::astore.currAlloc[numBucket];
96 return static_cast<void*
>(res);
100 #ifdef XERUS_REPLACE_ALLOCATOR 101 void*
operator new(std::size_t n) {
105 void *
operator new[](std::size_t s) {
141 void mydelete(
void *ptr) noexcept {
142 uint8_t n = *(
static_cast<uint8_t*
>(ptr)-1);
144 #ifdef XERUS_PERFORMANCE_ANALYSIS 145 xm::astore.currAlloc[n] -= 1;
147 if (programIsRunning) {
148 xm::astore.buckets[n].push_back(static_cast<uint8_t*>(ptr));
151 uint8_t offset = *(
static_cast<uint8_t*
>(ptr)-2);
152 xerus::misc::r_free(static_cast<void*>(static_cast<uint8_t*>(ptr)-xma::ALIGNMENT-1+offset));
156 void operator delete(
void* ptr) noexcept {
160 void operator delete[](
void* ptr) noexcept {
Collection of classes and functions that provide elementary functionality that is not special to xeru...
Header file for the data structures used by the custom new and delete operators.