AuroraRuntime/Source/Memory/Heap.cpp

410 lines
10 KiB
C++
Raw Normal View History

2021-06-27 21:25:29 +00:00
/***
Copyright (C) 2021 J Reece Wilson (a/k/a "Reece"). All rights reserved.
File: Heap.cpp
Date: 2021-6-12
Author: Reece
***/
2021-09-30 14:57:41 +00:00
#include <Source/RuntimeInternal.hpp>
2021-06-27 21:25:29 +00:00
#include "Memory.hpp"
#include "Heap.hpp"
#include "mimalloc.h"
#include "o1heap.hpp"
#if defined(AURORA_IS_POSIX_DERIVED)
#include <sys/mman.h>
#endif
2021-06-27 21:25:29 +00:00
namespace Aurora::Memory
{
static AuUInt32 RoundPageUp(AuUInt32 value)
{
auto pageMask = HWInfo::GetPageSize() - 1;
return (value + pageMask) & ~(pageMask);
}
static void *HeapLargeAllocate(AuUInt length)
{
length = RoundPageUp(length);
#if defined(AURORA_IS_MODERNNT_DERIVED)
return VirtualAlloc(nullptr, length, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
#elif defined(AURORA_IS_POSIX_DERIVED)
return mmap(0, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#else
// ideally we should page align.
// i think mimalloc has fast paths with warnings for overly large passthrough allocations. unsure.
// 32 alignment in the fastest way mimalloc can provide us memory seems adequate
// it's very easy for mimalloc to seethe at larger allocations, but it does have slowpaths to handle them
2022-01-20 16:37:22 +00:00
return AuMemory::FAlloc<void *>(length, 32);
#endif
}
static void HeapLargeFree(void *buffer, AuUInt length)
{
length = RoundPageUp(length);
#if defined(AURORA_IS_MODERNNT_DERIVED)
VirtualFree(buffer, 0, MEM_RELEASE);
#elif defined(AURORA_IS_POSIX_DERIVED)
munmap(buffer, length);
#else
2022-01-20 16:37:22 +00:00
AuMemory::Free(buffer);
mi_collect(false);
#endif
}
struct InternalHeap : Heap, AuEnableSharedFromThis<InternalHeap>
2021-06-27 21:25:29 +00:00
{
virtual AuSPtr<Heap> AllocateDivision(AuUInt32 heap, AuUInt32 alignment) override;
InternalHeap() : base_(nullptr), mutex_(nullptr), heap_(nullptr), count_(0)
2021-07-12 12:17:16 +00:00
{ }
2021-06-27 21:25:29 +00:00
virtual ~InternalHeap();
2021-06-27 21:25:29 +00:00
bool ownsMemory_ {};
bool Init(AuUInt length, void *ptr = nullptr);
2021-06-27 21:25:29 +00:00
typedef struct FragmentHeader
{
void *next;
void *prev;
size_t size;
bool used;
} FragmentHeader;
static AuUInt GetHeapSize(const void *ptr)
{
return reinterpret_cast<const FragmentHeader *>(ptr)[-1].size;
}
Types::size_t GetChunkSize(const void *head) override;
void *_FAlloc(Types::size_t length) override;
void *_FAlloc(Types::size_t length, Types::size_t align) override;
void *_ZAlloc(Types::size_t length) override;
void *_ZAlloc(Types::size_t length, Types::size_t align) override;
void *_ZRealloc(void *buffer, Types::size_t length) override;
void *_ZRealloc(void *buffer, Types::size_t length, Types::size_t align) override;
void *_FRealloc(void *buffer, Types::size_t length) override;
void *_FRealloc(void *buffer, Types::size_t length, Types::size_t align) override;
void _Free(void *buffer) override;
AuSPtr<Heap> GetSelfReference() override;
2021-07-12 12:17:16 +00:00
2021-06-27 21:25:29 +00:00
void TryRelease();
void DecrementUsers();
2021-06-27 21:25:29 +00:00
void RequestTermination();
private:
AuThreadPrimitives::MutexUnique_t mutex_;
2021-07-12 12:17:16 +00:00
void *base_ {};
O1HeapInstance *heap_ {};
int count_ {};
AuUInt length_ {};
bool isDangling_ {};
2021-06-27 21:25:29 +00:00
};
2021-07-12 12:17:16 +00:00
struct DeletableHeap : InternalHeap
{
Heap *parent {};
void *ptr2_ {};
DeletableHeap(Heap *parent, void *ptr);
~DeletableHeap();
};
DeletableHeap::DeletableHeap(Heap *parent, void *ptr) : parent(parent), ptr2_(ptr)
{
}
DeletableHeap::~DeletableHeap()
{
if (this->ptr2_)
{
parent->Free(this->ptr2_);
}
}
2021-06-27 21:25:29 +00:00
InternalHeap::~InternalHeap()
{
SysAssertDbgExp(count_ == 0);
2021-06-27 21:25:29 +00:00
if (this->base_)
2021-06-27 21:25:29 +00:00
{
o1HeapReleaseCpp(this->heap_);// ->~O1HeapInstance(); // TODO: make free func
2021-06-27 21:25:29 +00:00
if (this->ownsMemory_)
{
HeapLargeFree(this->base_, this->length_);
this->base_ = nullptr;
}
2021-06-27 21:25:29 +00:00
}
2021-07-12 12:17:16 +00:00
this->mutex_.reset();
2021-06-27 21:25:29 +00:00
}
bool InternalHeap::Init(AuUInt length, void *ptr)
2021-06-27 21:25:29 +00:00
{
SysAssert(!this->base_, "heap already initialized");
SysAssert(!this->mutex_, "heap already initialized");
2021-06-27 21:25:29 +00:00
SysAssert(length, "invalid heap allocation");
this->length_ = length;
2021-06-27 21:25:29 +00:00
this->mutex_ = AuThreadPrimitives::MutexUnique();
if (!this->mutex_) return false;
2021-06-27 21:25:29 +00:00
if (ptr)
{
this->base_ = ptr;
this->ownsMemory_ = false;
}
else
{
this->base_ = HeapLargeAllocate(length);
if (!base_) return false;
this->ownsMemory_ = true;
}
this->heap_ = o1heapInit(this->base_, length,
[this](const O1HeapInstance *const handle) -> void
{
SysAssertDbg(this->mutex_, "missing mutex");
this->mutex_->Lock();
},
[this](const O1HeapInstance *const handle) -> void
{
SysAssertDbg(this->mutex_, "missing mutex");
this->mutex_->Unlock();
}
);
2021-06-27 21:25:29 +00:00
return true;
}
Types::size_t InternalHeap::GetChunkSize(const void *head)
{
return InternalHeap::GetHeapSize(head);
}
AuSPtr<Heap> InternalHeap::AllocateDivision(AuUInt32 heap, AuUInt32 alignment)
{
return AllocateDivisionGlobal(this, heap, alignment);
}
AuSPtr<Heap> AllocateDivisionGlobal(Heap *heap, AuUInt32 length, AuUInt32 alignment)
{
auto ptr = heap->ZAlloc<void *>(length, alignment);
if (!ptr)
{
return {};
}
auto ret = AuMakeShared<DeletableHeap>(heap, ptr);
if (!ret)
{
return {};
}
if (!ret->Init(length, ptr))
{
return {};
}
return ret;
}
2021-06-27 21:25:29 +00:00
void *InternalHeap::_FAlloc(Types::size_t length)
{
if (!this->heap_)
{
return nullptr;
}
auto ret = o1heapAllocate(this->heap_, length);
if (ret)
{
this->count_++;
}
2021-06-27 21:25:29 +00:00
return ret;
}
void *InternalHeap::_FAlloc(Types::size_t length, Types::size_t align)
{
SysAssert(align < O1HEAP_ALIGNMENT, "heap wrapping is unsupported, alignment past the supported 2^x alignment is not possible");
return this->_FAlloc(length);
2021-06-27 21:25:29 +00:00
}
void *InternalHeap::_ZAlloc(Types::size_t length)
{
if (!this->heap_)
{
return nullptr;
}
auto ptr = this->_FAlloc(length);
if (!ptr)
{
return nullptr;
}
AuMemset(ptr, 0, length);
2021-06-27 21:25:29 +00:00
return ptr;
}
void *InternalHeap::_ZAlloc(Types::size_t length, Types::size_t align)
{
SysAssert(align < O1HEAP_ALIGNMENT, "heap wrapping is unsupported, alignment past the supported 2^x alignment is not possible");
return _ZAlloc(length);
}
void *InternalHeap::_ZRealloc(void *buffer, Types::size_t length)
{
auto prevLength = GetHeapSize(buffer);
auto alloc = this->_ZAlloc(length);
if (!alloc)
{
return nullptr;
}
2022-01-19 17:08:13 +00:00
AuMemcpy(alloc, buffer, AuMin(prevLength, length));
this->_Free(buffer);
2021-06-27 21:25:29 +00:00
return alloc;
}
void *InternalHeap::_ZRealloc(void *buffer, Types::size_t length, Types::size_t align)
{
SysAssert(align < O1HEAP_ALIGNMENT, "heap wrapping is unsupported, alignment past the supported 2^x alignment is not possible");
return this->_ZRealloc(buffer, length);
2021-06-27 21:25:29 +00:00
}
void *InternalHeap::_FRealloc(void *buffer, Types::size_t length)
{
auto prevLength = GetHeapSize(buffer);
auto alloc = this->_FAlloc(length);
if (!alloc)
{
return nullptr;
}
2022-01-19 17:08:13 +00:00
AuMemcpy(alloc, buffer, AuMin(prevLength, length));
this->_Free(buffer);
2021-06-27 21:25:29 +00:00
return alloc;
}
void *InternalHeap::_FRealloc(void *buffer, Types::size_t length, Types::size_t align)
{
SysAssert(align < O1HEAP_ALIGNMENT, "heap wrapping is unsupported, alignment past the supported 2^x alignment is not possible");
return this->_FRealloc(buffer, length);
2021-06-27 21:25:29 +00:00
}
void InternalHeap::_Free(void *buffer)
{
if (buffer == nullptr)
{
return;
}
o1heapFree(this->heap_, buffer);
DecrementUsers();
}
void InternalHeap::DecrementUsers()
{
if (--this->count_ == 0)
{
AU_LOCK_GUARD(this->mutex_);
TryRelease();
}
2021-06-27 21:25:29 +00:00
}
void InternalHeap::TryRelease()
{
if (!this->isDangling_)
{
return;
}
2021-06-27 21:25:29 +00:00
if (count_ == 0)
2021-06-27 21:25:29 +00:00
{
delete this;
}
}
void InternalHeap::RequestTermination()
{
AU_LOCK_GUARD(this->mutex_);
if (count_)
2021-06-27 21:25:29 +00:00
{
AuLogWarn("Heap life was less than its allocations, waiting for final free");
AuLogWarn("Reporting using mayday!");
2021-06-27 21:25:29 +00:00
Telemetry::Mayday();
this->isDangling_ = true;
2021-06-27 21:25:29 +00:00
TryRelease();
}
else
{
delete this;
}
}
AuSPtr<Heap> InternalHeap::GetSelfReference()
{
try
{
[*/+/-] MEGA COMMIT. ~2 weeks compressed. The intention is to quickly improve and add util apis, enhance functionality given current demands, go back to the build pipeline, finish that, publish runtime tests, and then use what we have to go back to to linux support with a more stable api. [+] AuMakeSharedArray [+] Technet ArgvQuote [+] Grug subsystem (UNIX signal thread async safe ipc + telemetry flusher + log flusher.) [+] auEndianness -> Endian swap utils [+] AuGet<N>(...) [*] AUE_DEFINE conversion for ECompresionType, EAnsiColor, EHashType, EStreamError, EHexDump [+] ConsoleMessage ByteBuffer serialization [+] CmdLine subsystem for parsing command line arguments and simple switch/flag checks [*] Split logger from console subsystem [+] StartupParameters -> A part of a clean up effort under Process [*] Refactor SysErrors header + get caller hack [+] Atomic APIs [+] popcnt [+] Ring Buffer sink [+] Added more standard errors Catch, Submission, LockError, NoAccess, ResourceMissing, ResourceLocked, MalformedData, InSandboxContext, ParseError [+] Added ErrorCategorySet, ErrorCategoryClear, GetStackTrace [+] IExitSubscriber, ETriggerLevel [*] Write bias the high performance RWLockImpl read-lock operation operation [+] ExitHandlerAdd/ExitHandlerRemove (exit subsystem) [*] Updated API style Digests [+] CpuId::CpuBitCount [+] GetUserProgramsFolder [+] GetPackagePath [*] Split IStreamReader with an inl file [*] BlobWriter/BlobReader/BlobArbitraryReader can now take shared pointers to bytebuffers. default constructor allocates a new scalable bytebuffer [+] ICharacterProvider [+] ICharacterProviderEx [+] IBufferedCharacterConsumer [+] ProviderFromSharedString [+] ProviderFromString [+] BufferConsumerFromProvider [*] Parse Subsystem uses character io bufferer [*] Rewritten NT's high perf semaphore to use userland SRW/ConVars [like mutex, based on generic semaphore] [+] ByteBuffer::ResetReadPointer [*] Bug fix bytebuffer base not reset on free and some scaling issues [+] ProcessMap -> Added kSectionNameStack, kSectionNameFile, kSectionNameHeap for Section [*] ProcessMap -> Refactor Segment to Section. I was stupid for keeping a type conflict hack API facing [+] Added 64 *byte* fast RNG seeds [+] File Advisorys/File Lock Awareness [+] Added extended IAuroraThread from OS identifier caches for debug purposes [*] Tweaked how memory is reported on Windows. Better consistency of what values mean across functions. [*] Broke AuroraUtils/Typedefs out into a separate library [*] Update build script [+] Put some more effort into adding detail to the readme before rewriting it, plus, added some media [*] Improved public API documentation [*] Bug fix `SetConsoleCtrlHandler` [+] Locale TimeDateToFileNameISO8601 [+] Console config stdOutShortTime [*] Begin using internal UTF8/16 decoders when platform support isnt available (instead of stl) [*] Bug fixes in decoders [*] Major bug fix, AuMax [+] RateLimiter [+] Binary file sink [+] Log directory sink [*] Data header usability (more operators) [+] AuRemoveRange [+] AuRemove [+] AuTryRemove [+] AuTryRemoveRange [+] auCastUtils [+] Finish NewLSWin32Source [+] AuTryFindByTupleN, AuTryRemoveByTupleN [+] Separated AuRead/Write types, now in auTypeUtils [+] Added GetPosition/SetPosition to FileWriter [*] Fix stupid AuMin in place of AuMax in SpawnThread.Unix.Cpp [*] Refactored Arbitrary readers to SeekingReaders (as in, they could be atomic and/or parallelized, and accept an arbitrary position as a work parameter -> not Seekable, as in, you can simply set the position) [*] Hack back in the sched deinit [+] File AIO loop source interop [+] Begin to prototype a LoopQueue object I had in mind for NT, untested btw [+] Stub code for networking [+] Compression BaseStream/IngestableStreamBase [*] Major: read/write locks now support write-entrant read routines. [*] Compression subsystem now uses the MemoryView concept [*] Rewrite the base stream compressions, made them less broken [*] Update hashing api [*] WriterTryGoForward and ReaderTryGoForward now revert to the previous relative index instead of panicing [+] Added new AuByteBuffer apis Trim, Pad, WriteFrom, WriteString, [TODO: ReadString] [+] Added ByteBufferPushReadState [+] Added ByteBufferPushWriteState [*] Move from USC-16 to full UTF-16. Win32 can handle full UTF-16. [*] ELogLevel is now an Aurora enum [+] Raised arbitrary limit in header to 255, the max filter buffer [+] Explicit GZip support [+] Explicit Zip support [+] Added [some] compressors et al
2022-02-17 00:11:40 +00:00
return AuSharedFromThis();
}
catch (...)
{
return {};
}
}
2021-06-27 21:25:29 +00:00
AUKN_SYM Heap *AllocHeapNew(AuUInt size)
{
auto heap = _new InternalHeap();
if (!heap)
{
return nullptr;
}
if (!heap->Init(size, nullptr))
2021-06-27 21:25:29 +00:00
{
delete heap;
return nullptr;
}
return heap;
}
2021-07-12 12:17:16 +00:00
AUKN_SYM void AllocHeapRelease(Heap *heap)
2021-06-27 21:25:29 +00:00
{
2021-09-06 10:58:08 +00:00
static_cast<InternalHeap *>(heap)->RequestTermination();
2021-06-27 21:25:29 +00:00
}
AUKN_SYM Heap *RequestHeapOfRegionNew(void *ptr, AuUInt size)
{
auto heap = _new InternalHeap();
if (!heap)
{
return nullptr;
}
if (!heap->Init(size, ptr))
{
delete heap;
return nullptr;
}
return heap;
}
AUKN_SYM void RequestHeapOfRegionRelease(Heap *heap)
{
static_cast<InternalHeap *>(heap)->RequestTermination();
}
2021-06-27 21:25:29 +00:00
}