425 lines
11 KiB
C++
425 lines
11 KiB
C++
/***
|
|
Copyright (C) 2021 J Reece Wilson (a/k/a "Reece"). All rights reserved.
|
|
|
|
File: Heap.cpp
|
|
Date: 2021-6-12
|
|
Author: Reece
|
|
***/
|
|
#include <Source/RuntimeInternal.hpp>
|
|
#include "Memory.hpp"
|
|
#include "Heap.hpp"
|
|
|
|
#include "mimalloc.h"
|
|
#include "o1heap.hpp"
|
|
|
|
#if defined(AURORA_IS_POSIX_DERIVED)
|
|
#include <sys/mman.h>
|
|
#endif
|
|
|
|
namespace Aurora::Memory
|
|
{
|
|
static AuUInt32 RoundPageUp(AuUInt32 value)
|
|
{
|
|
auto pageMask = HWInfo::GetPageSize() - 1;
|
|
return (value + pageMask) & ~(pageMask);
|
|
}
|
|
|
|
static void *HeapLargeAllocate(AuUInt length)
|
|
{
|
|
length = RoundPageUp(length);
|
|
#if defined(AURORA_IS_MODERNNT_DERIVED)
|
|
return VirtualAlloc(nullptr, length, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
|
|
#elif defined(AURORA_IS_POSIX_DERIVED)
|
|
return mmap(0, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
#else
|
|
// ideally we should page align.
|
|
// i think mimalloc has fast paths with warnings for overly large passthrough allocations. unsure.
|
|
// 32 alignment in the fastest way mimalloc can provide us memory seems adequate
|
|
// it's very easy for mimalloc to seethe at larger allocations, but it does have slowpaths to handle them
|
|
return AuMemory::FAlloc<void *>(length, 32);
|
|
#endif
|
|
}
|
|
|
|
static void HeapLargeFree(void *buffer, AuUInt length)
|
|
{
|
|
length = RoundPageUp(length);
|
|
#if defined(AURORA_IS_MODERNNT_DERIVED)
|
|
VirtualFree(buffer, 0, MEM_RELEASE);
|
|
#elif defined(AURORA_IS_POSIX_DERIVED)
|
|
munmap(buffer, length);
|
|
#else
|
|
AuMemory::Free(buffer);
|
|
mi_collect(false);
|
|
#endif
|
|
}
|
|
|
|
struct InternalHeap : BaseHeap, AuEnableSharedFromThis<InternalHeap>
|
|
{
|
|
virtual AuSPtr<Heap> AllocateDivision(AuUInt32 heap, AuUInt32 alignment) override;
|
|
|
|
InternalHeap() : mutex_(nullptr), heap_(nullptr), count_(0)
|
|
{ }
|
|
|
|
virtual ~InternalHeap();
|
|
|
|
bool ownsMemory_ {};
|
|
bool Init(AuUInt length, void *ptr = nullptr);
|
|
|
|
typedef struct FragmentHeader
|
|
{
|
|
void *next;
|
|
void *prev;
|
|
size_t size;
|
|
bool used;
|
|
} FragmentHeader;
|
|
|
|
static AuUInt GetHeapSize(const void *ptr)
|
|
{
|
|
return reinterpret_cast<const FragmentHeader *>(ptr)[-1].size;
|
|
}
|
|
|
|
Types::size_t GetChunkSize(const void *head) override;
|
|
|
|
void *_FAlloc(Types::size_t length) override;
|
|
void *_FAlloc(Types::size_t length, Types::size_t align) override;
|
|
void *_ZAlloc(Types::size_t length) override;
|
|
void *_ZAlloc(Types::size_t length, Types::size_t align) override;
|
|
void *_ZRealloc(void *buffer, Types::size_t length) override;
|
|
void *_ZRealloc(void *buffer, Types::size_t length, Types::size_t align) override;
|
|
void *_FRealloc(void *buffer, Types::size_t length) override;
|
|
void *_FRealloc(void *buffer, Types::size_t length, Types::size_t align) override;
|
|
void _Free(void *buffer) override;
|
|
AuSPtr<Heap> GetSelfReference() override;
|
|
|
|
void TryRelease();
|
|
void DecrementUsers();
|
|
void RequestTermination();
|
|
|
|
void UpdateStats() override;
|
|
|
|
private:
|
|
|
|
AuThreadPrimitives::MutexUnique_t mutex_;
|
|
|
|
O1HeapInstance *heap_ {};
|
|
int count_ {};
|
|
bool isDangling_ {};
|
|
};
|
|
|
|
struct DeletableHeap : InternalHeap
|
|
{
|
|
Heap *parent {};
|
|
void *ptr2_ {};
|
|
|
|
DeletableHeap(Heap *parent, void *ptr);
|
|
~DeletableHeap();
|
|
};
|
|
|
|
DeletableHeap::DeletableHeap(Heap *parent, void *ptr) : parent(parent), ptr2_(ptr)
|
|
{
|
|
|
|
}
|
|
|
|
DeletableHeap::~DeletableHeap()
|
|
{
|
|
if (this->ptr2_)
|
|
{
|
|
parent->Free(this->ptr2_);
|
|
}
|
|
}
|
|
|
|
InternalHeap::~InternalHeap()
|
|
{
|
|
SysAssertDbgExp(count_ == 0);
|
|
|
|
if (this->base_)
|
|
{
|
|
if (this->heap_)
|
|
{
|
|
o1HeapReleaseCpp(this->heap_);// ->~O1HeapInstance(); // TODO: make free func
|
|
this->heap_ = nullptr;
|
|
}
|
|
|
|
if (this->ownsMemory_)
|
|
{
|
|
HeapLargeFree(this->base_, this->length_);
|
|
this->base_ = nullptr;
|
|
}
|
|
}
|
|
|
|
this->mutex_.reset();
|
|
}
|
|
|
|
bool InternalHeap::Init(AuUInt length, void *ptr)
|
|
{
|
|
SysAssert(!this->base_, "heap already initialized");
|
|
SysAssert(!this->mutex_, "heap already initialized");
|
|
|
|
SysAssert(length, "invalid heap allocation");
|
|
this->length_ = length;
|
|
|
|
this->mutex_ = AuThreadPrimitives::MutexUnique();
|
|
if (!this->mutex_) return false;
|
|
|
|
if (ptr)
|
|
{
|
|
this->base_ = ptr;
|
|
this->ownsMemory_ = false;
|
|
}
|
|
else
|
|
{
|
|
this->base_ = HeapLargeAllocate(length);
|
|
if (!base_) return false;
|
|
|
|
this->ownsMemory_ = true;
|
|
}
|
|
|
|
this->heap_ = o1heapInit(this->base_, length,
|
|
[this](const O1HeapInstance *const handle) -> void
|
|
{
|
|
SysAssertDbg(this->mutex_, "missing mutex");
|
|
this->mutex_->Lock();
|
|
},
|
|
[this](const O1HeapInstance *const handle) -> void
|
|
{
|
|
SysAssertDbg(this->mutex_, "missing mutex");
|
|
this->mutex_->Unlock();
|
|
}
|
|
);
|
|
|
|
return true;
|
|
}
|
|
|
|
Types::size_t InternalHeap::GetChunkSize(const void *head)
|
|
{
|
|
return InternalHeap::GetHeapSize(head);
|
|
}
|
|
|
|
AuSPtr<Heap> InternalHeap::AllocateDivision(AuUInt32 heap, AuUInt32 alignment)
|
|
{
|
|
return AllocateDivisionGlobal(this, heap, alignment);
|
|
}
|
|
|
|
AuSPtr<Heap> AllocateDivisionGlobal(Heap *heap, AuUInt32 length, AuUInt32 alignment)
|
|
{
|
|
auto ptr = heap->ZAlloc<void *>(length, alignment);
|
|
if (!ptr)
|
|
{
|
|
return {};
|
|
}
|
|
|
|
auto ret = AuMakeShared<DeletableHeap>(heap, ptr);
|
|
if (!ret)
|
|
{
|
|
heap->Free(ptr);
|
|
return {};
|
|
}
|
|
|
|
if (!ret->Init(length, ptr))
|
|
{
|
|
return {};
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
void *InternalHeap::_FAlloc(Types::size_t length)
|
|
{
|
|
if (!this->heap_)
|
|
{
|
|
return nullptr;
|
|
}
|
|
|
|
auto ret = o1heapAllocate(this->heap_, length);
|
|
if (ret)
|
|
{
|
|
AuAtomicAdd(&this->count_, 1);
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
void *InternalHeap::_FAlloc(Types::size_t length, Types::size_t align)
|
|
{
|
|
SysAssert(align < O1HEAP_ALIGNMENT, "heap wrapping is unsupported, alignment past the supported 2^x alignment is not possible");
|
|
return this->_FAlloc(length);
|
|
}
|
|
|
|
void *InternalHeap::_ZAlloc(Types::size_t length)
|
|
{
|
|
if (!this->heap_)
|
|
{
|
|
return nullptr;
|
|
}
|
|
|
|
auto ptr = this->_FAlloc(length);
|
|
if (!ptr)
|
|
{
|
|
return nullptr;
|
|
}
|
|
|
|
AuMemset(ptr, 0, length);
|
|
return ptr;
|
|
}
|
|
|
|
void *InternalHeap::_ZAlloc(Types::size_t length, Types::size_t align)
|
|
{
|
|
SysAssert(align < O1HEAP_ALIGNMENT, "heap wrapping is unsupported, alignment past the supported 2^x alignment is not possible");
|
|
return _ZAlloc(length);
|
|
}
|
|
|
|
void *InternalHeap::_ZRealloc(void *buffer, Types::size_t length)
|
|
{
|
|
auto prevLength = GetHeapSize(buffer);
|
|
auto alloc = this->_ZAlloc(length);
|
|
if (!alloc)
|
|
{
|
|
return nullptr;
|
|
}
|
|
|
|
AuMemcpy(alloc, buffer, AuMin(prevLength, length));
|
|
this->_Free(buffer);
|
|
return alloc;
|
|
}
|
|
|
|
void *InternalHeap::_ZRealloc(void *buffer, Types::size_t length, Types::size_t align)
|
|
{
|
|
SysAssert(align < O1HEAP_ALIGNMENT, "heap wrapping is unsupported, alignment past the supported 2^x alignment is not possible");
|
|
return this->_ZRealloc(buffer, length);
|
|
}
|
|
|
|
void *InternalHeap::_FRealloc(void *buffer, Types::size_t length)
|
|
{
|
|
auto prevLength = GetHeapSize(buffer);
|
|
auto alloc = this->_FAlloc(length);
|
|
if (!alloc)
|
|
{
|
|
return nullptr;
|
|
}
|
|
|
|
AuMemcpy(alloc, buffer, AuMin(prevLength, length));
|
|
this->_Free(buffer);
|
|
return alloc;
|
|
}
|
|
|
|
void *InternalHeap::_FRealloc(void *buffer, Types::size_t length, Types::size_t align)
|
|
{
|
|
SysAssert(align < O1HEAP_ALIGNMENT, "heap wrapping is unsupported, alignment past the supported 2^x alignment is not possible");
|
|
return this->_FRealloc(buffer, length);
|
|
}
|
|
|
|
void InternalHeap::_Free(void *buffer)
|
|
{
|
|
if (buffer == nullptr)
|
|
{
|
|
return;
|
|
}
|
|
|
|
o1heapFree(this->heap_, buffer);
|
|
DecrementUsers();
|
|
}
|
|
|
|
void InternalHeap::DecrementUsers()
|
|
{
|
|
if (AuAtomicSub(&this->count_, 1) == 0)
|
|
{
|
|
TryRelease();
|
|
}
|
|
}
|
|
|
|
void InternalHeap::TryRelease()
|
|
{
|
|
if (!this->isDangling_)
|
|
{
|
|
return;
|
|
}
|
|
|
|
if (count_ == 0)
|
|
{
|
|
delete this;
|
|
}
|
|
}
|
|
|
|
void InternalHeap::RequestTermination()
|
|
{
|
|
this->mutex_->Lock();
|
|
|
|
if (this->count_)
|
|
{
|
|
SysPushErrorMemory("Heap life was less than its allocations, waiting for final free");
|
|
SysPushErrorMemory("Reporting using mayday!");
|
|
Telemetry::Mayday();
|
|
|
|
this->isDangling_ = true;
|
|
this->mutex_->Unlock();
|
|
}
|
|
else
|
|
{
|
|
delete this;
|
|
}
|
|
}
|
|
|
|
void InternalHeap::UpdateStats()
|
|
{
|
|
auto pDiag = o1heapGetDiagnostics(this->heap_);
|
|
|
|
this->stats.uBytesLiveCounter = pDiag.allocated;
|
|
this->stats.uBytesCapacity = pDiag.capacity;
|
|
this->stats.uBytesPeakCounter = pDiag.peak_allocated;
|
|
}
|
|
|
|
AuSPtr<Heap> InternalHeap::GetSelfReference()
|
|
{
|
|
try
|
|
{
|
|
return AuSharedFromThis();
|
|
}
|
|
catch (...)
|
|
{
|
|
return {};
|
|
}
|
|
}
|
|
|
|
AUKN_SYM Heap *AllocHeapNew(AuUInt size)
|
|
{
|
|
auto heap = _new InternalHeap();
|
|
if (!heap)
|
|
{
|
|
return nullptr;
|
|
}
|
|
|
|
if (!heap->Init(size, nullptr))
|
|
{
|
|
delete heap;
|
|
return nullptr;
|
|
}
|
|
|
|
return heap;
|
|
}
|
|
|
|
AUKN_SYM void AllocHeapRelease(Heap *heap)
|
|
{
|
|
static_cast<InternalHeap *>(heap)->RequestTermination();
|
|
}
|
|
|
|
AUKN_SYM Heap *RequestHeapOfRegionNew(void *ptr, AuUInt size)
|
|
{
|
|
auto heap = _new InternalHeap();
|
|
if (!heap)
|
|
{
|
|
return nullptr;
|
|
}
|
|
|
|
if (!heap->Init(size, ptr))
|
|
{
|
|
delete heap;
|
|
return nullptr;
|
|
}
|
|
|
|
return heap;
|
|
}
|
|
|
|
AUKN_SYM void RequestHeapOfRegionRelease(Heap *heap)
|
|
{
|
|
static_cast<InternalHeap *>(heap)->RequestTermination();
|
|
}
|
|
} |