DEV Community

Ajay Roy
Ajay Roy

Posted on

Memory management system in C/C++

include

include

include

include

include

include

include

include

include

include

include

include

include

include

include

include

// ----------------------
// High-Performance Enterprise Memory Manager
// ----------------------
class HPMemoryManager {
private:
// Global tracking of allocations
std::unordered_set allocations;
std::unordered_set persistentAllocations;
mutable std::shared_mutex allocMutex;

// Lock-free / thread-local arenas
struct ArenaBlock {
    std::atomic<ArenaBlock*> next;
    char data[1]; // flexible array
};

struct Arena {
    size_t blockSize;
    std::atomic<ArenaBlock*> freeListHead;
    size_t totalBlocks;
    std::mutex expandMutex; // only for pool expansion
};

std::unordered_map<size_t, std::shared_ptr<Arena>> globalArenas;

// Thread-local arena access
thread_local static std::unordered_map<size_t, std::shared_ptr<Arena>> threadLocalArenas;
thread_local static std::unordered_set<void*> threadLocalAllocations;

bool productionBoundsCheck = true;
Enter fullscreen mode Exit fullscreen mode

public:
HPMemoryManager() = default;

// ----------------------
// Lock-free allocation from arena
// ----------------------
void* allocate(size_t size, bool persistent = false) {
    ArenaBlock* block = nullptr;

    auto& arena = getOrCreateArena(size);

    // Try pop from lock-free free list
    while (true) {
        block = arena->freeListHead.load(std::memory_order_acquire);
        if (!block) break;
        ArenaBlock* next = block->next.load(std::memory_order_relaxed);
        if (arena->freeListHead.compare_exchange_weak(block, next)) {
            registerAllocation(block->data, persistent);
            return block->data;
        }
    }

    // Expand pool if empty
    std::lock_guard<std::mutex> lock(arena->expandMutex);
    size_t expandCount = 128; // batch allocate
    for (size_t i = 0; i < expandCount; ++i) {
        char* raw = new char[size + sizeof(ArenaBlock*)];
        auto* newBlock = reinterpret_cast<ArenaBlock*>(raw);
        newBlock->next.store(arena->freeListHead.load());
        arena->freeListHead.store(newBlock);
    }

    return allocate(size, persistent);
}

void deallocate(void* ptr, size_t size) {
    if (!ptr) return;
    unregisterAllocation(ptr);

    auto& arena = getOrCreateArena(size);
    auto* block = reinterpret_cast<ArenaBlock*>(
        reinterpret_cast<char*>(ptr) - offsetof(ArenaBlock, data)
    );

    // push back lock-free
    ArenaBlock* oldHead = arena->freeListHead.load();
    do {
        block->next.store(oldHead);
    } while (!arena->freeListHead.compare_exchange_weak(oldHead, block));
}

template<typename T>
T* allocateObject(bool persistent = false) {
    return reinterpret_cast<T*>(allocate(sizeof(T), persistent));
}

template<typename T>
void deallocateObject(T*& obj) {
    deallocate(obj, sizeof(T));
    obj = nullptr;
}

// ----------------------
// C-style allocation
// ----------------------
void* mallocAlloc(size_t size, bool persistent = false) {
    void* ptr = std::malloc(size);
    if (!ptr) throw std::bad_alloc();
    registerAllocation(ptr, persistent);
    return ptr;
}

void freeAlloc(void*& ptr, size_t size) {
    if (!ptr) return;
    unregisterAllocation(ptr);
    std::free(ptr);
    ptr = nullptr;
}
Enter fullscreen mode Exit fullscreen mode

private:
Arena& getOrCreateArena(size_t size) {
thread_local static std::unordered_map> localMap;

    if (localMap.find(size) != localMap.end())
        return *localMap[size];

    std::shared_ptr<Arena> arena;
    {
        std::shared_lock<std::shared_mutex> lock(allocMutex);
        if (globalArenas.find(size) != globalArenas.end())
            arena = globalArenas[size];
    }
    if (!arena) {
        std::unique_lock<std::shared_mutex> lock(allocMutex);
        arena = std::make_shared<Arena>();
        arena->blockSize = size;
        arena->freeListHead.store(nullptr);
        arena->totalBlocks = 0;
        globalArenas[size] = arena;
    }

    localMap[size] = arena;
    return *arena;
}

// ----------------------
// Allocation tracking
// ----------------------
Enter fullscreen mode Exit fullscreen mode

public:
void registerAllocation(void* ptr, bool persistent = false) {
std::unique_lockstd::shared_mutex lock(allocMutex);
if (allocations.find(ptr) != allocations.end())
throw std::runtime_error("Double allocation detected");
allocations.insert(ptr);
threadLocalAllocations.insert(ptr);
if (persistent) persistentAllocations.insert(ptr);
}

void unregisterAllocation(void* ptr) {
    std::unique_lock<std::shared_mutex> lock(allocMutex);
    auto it = allocations.find(ptr);
    if (it == allocations.end())
        throw std::runtime_error("Double free / invalid free");
    allocations.erase(it);
    threadLocalAllocations.erase(ptr);
    persistentAllocations.erase(ptr);
}

void cleanupThreadLocal() {
    for (auto ptr : threadLocalAllocations) {
        unregisterAllocation(ptr);
        ::operator delete(ptr);
    }
    threadLocalAllocations.clear();
}

// ----------------------
// Bounds checking
// ----------------------
template<typename T>
void checkBounds(T* arr, size_t index, size_t size) {
    if (productionBoundsCheck && index >= size)
        throw std::out_of_range("Array index out of bounds");
}

// ----------------------
// RAII for OS / External resources
// ----------------------
class FileHandle {
    std::ofstream* file;
    HPMemoryManager& memMgr;
public:
    FileHandle(HPMemoryManager& mgr, const std::string& filename) : memMgr(mgr) {
        file = new std::ofstream(filename, std::ios::out);
        if (!file->is_open()) throw std::runtime_error("Failed to open file");
        memMgr.registerAllocation(file, true);
    }
    std::ofstream& get() { return *file; }
    ~FileHandle() {
        if (file && file->is_open()) file->close();
        memMgr.deallocateObject(file);
    }
};

// ----------------------
// Async task execution
// ----------------------
template<typename F>
void runAsync(F&& func) {
    std::thread([this, func]() {
        func();
        cleanupThreadLocal();
    }).detach();
}

// ----------------------
// Destructor
// ----------------------
~HPMemoryManager() {
    std::unique_lock<std::shared_mutex> lock(allocMutex);
    if (!allocations.empty()) {
        std::cerr << "Memory leaks detected: " << allocations.size() << "\n";
        for (auto ptr : allocations) ::operator delete(ptr);
    }
    for (auto& [size, arena] : globalArenas) {
        ArenaBlock* block = arena->freeListHead.load();
        while (block) {
            ArenaBlock* next = block->next.load();
            ::operator delete(block);
            block = next;
        }
    }
}
Enter fullscreen mode Exit fullscreen mode

};

// ----------------------
// SharedObject for multi-level DAG / cyclic cleanup
// ----------------------
class SharedObject {
private:
std::weak_ptr weakSelf;
std::vectorstd::shared_ptr<SharedObject> children;

public:
void setSelf(std::shared_ptr self) { weakSelf = self; }
void addChild(std::shared_ptr child) { children.push_back(child); }

void cleanupGraph(std::unordered_set<SharedObject*>& visited) {
    if (visited.find(this) != visited.end()) return;
    visited.insert(this);
    for (auto& child : children) {
        if (child) child->cleanupGraph(visited);
    }
    children.clear();
}
Enter fullscreen mode Exit fullscreen mode

};

// ----------------------
// Demo / Testing
// ----------------------
int main() {
HPMemoryManager mem;

try {
    // Single object
    int* a = mem.allocateObject<int>();
    *a = 42;
    std::cout << "Single object: " << *a << "\n";
    mem.deallocateObject(a);

    // Array allocation
    int* arr = reinterpret_cast<int*>(mem.allocate(sizeof(int) * 10));
    for (int i = 0; i < 10; i++) {
        mem.checkBounds(arr, i, 10);
        arr[i] = i * 5;
    }
    for (int i = 0; i < 10; i++) std::cout << arr[i] << " ";
    std::cout << "\n";
    mem.deallocate(arr, sizeof(int) * 10);

    // Persistent memory
    int* persistentInt = mem.allocateObject<int>(true);
    *persistentInt = 999;
    std::cout << "Persistent value: " << *persistentInt << "\n";

    // Async tasks
    mem.runAsync([&mem]() {
        int* tdata = mem.allocateObject<int>();
        *tdata = 777;
        std::this_thread::sleep_for(std::chrono::milliseconds(50));
        mem.deallocateObject(tdata);
    });
    std::this_thread::sleep_for(std::chrono::milliseconds(100));

    // RAII file demo
    {
        HPMemoryManager::FileHandle fh(mem, "highperf_safe.txt");
        fh.get() << "High-performance enterprise memory manager demo\n";
    }

    // DAG / cyclic references
    std::shared_ptr<SharedObject> parent = std::make_shared<SharedObject>();
    parent->setSelf(parent);
    std::shared_ptr<SharedObject> child = std::make_shared<SharedObject>();
    parent->addChild(child);
    std::unordered_set<SharedObject*> visited;
    parent->cleanupGraph(visited);

} catch (const std::exception& e) {
    std::cerr << "Error: " << e.what() << "\n";
}

return 0; // Destructor auto-cleans everything
Enter fullscreen mode Exit fullscreen mode

}

Top comments (0)