[*] Updated memory comments and keep alignment

This commit is contained in:
Reece Wilson 2024-01-15 12:04:40 +00:00
parent 895ef05887
commit 997d4a2eca
4 changed files with 93 additions and 55 deletions

View File

@ -20,29 +20,23 @@ namespace Aurora::Memory
eStringQword
};
// TODO: rework scaling
static const auto kBufferPageSize = 512;
//static const auto kBufferBasePower = 8;
static const auto kBufferInitialPower = 9;// -kBufferBasePower; // 4-bit integer
static const auto kBufferInitialPower = 9;
/***
* A bytebuffer object represents a "page" aligned (optionally) resizable buffer **or** a ring buffer.
* Trivial linear serialization use cases will follow the linear fast paths, not those of a ring buffer.
*
* Ring buffers are used to wrap streams when the consumer may expect arbitrary stream seeks of an otherwise
* limited consume-once stream
*
* EG (old):
* -> Peeking a header in a datagram, or tcp stream; where instead of freeing the datagram or double
* buffering the network stack when required, a ring buffer is used to prevent reallocation on each frame
* -> Peeking, or seeking back after, compression read. A compression api could be fed on-known-input-available
* or on-demand, writing to its write head pointer, while never running out of space so long as the
* decompressed ring read head continues moving
* A bytebuffer object represents a:
* * optionally resizable buffer;
* * -n infinite read/writer stream pair, in ring buffer mode;
* * synonym for fulfill-once AuList<AuUInt8> & output parameters;
* * stream of arbitrary serialization/deserialization, seekable read/writes;
* * ...tuple of <buffer base, read head, write head> and some flags;
* * ...method by which one can serialize data in-place via SharedByteBuffer
*
* EG (2022):
* -> AuProtocol ProtocolPiece buffers
* -> AuCompression internal buffer
* -> A socket's output stream / space for user-write and overlapped submission
* -> A sockets' output stream / buffer for user-read/write and overlapped submission
* -> ...
*
* Deprecates INetworkStream, fixes allocation issues around compression backends
@ -80,13 +74,11 @@ namespace Aurora::Memory
AuUInt8 flagWriteError : 1 {};
AuUInt8 flagNoFree : 1 {};
AuUInt8 flagNoRealloc: 1 {};
AuUInt8 flagAlwaysExpandable : 1 {}; // it's a long story from how we got from string views to std::vector<std::uint8_t>s to current day AuByteBuffer.
// anyway long story short, if you want a buffered api to write into us linearly and grow, enable me.
// if you just want ::Write and similar functions to work keem me false and enable flagExpandable.
// flagExpandable is for when the default constructor is called. i'm for apis that use us as an interface to grow.
AuUInt8 flagAlwaysExpandable : 1 {};
// TODO: flag: allow circular overrun to allow for 100% access of the buffer from either read head
// - implicit padding
AuUInt8 scaleSize {};//
AuUInt8 alignment {};
///////////////////////////////////////////////////////////////////////
/**
@ -347,7 +339,8 @@ namespace Aurora::Memory
// utils: &byteArray[n]
/**
* @brief read u8 relative to the read head
* @brief read u8 reference relative to the base
* @warning not const safe
* @param idx
* @return
*/
@ -421,12 +414,34 @@ namespace Aurora::Memory
inline auline MemoryViewWrite GetLinearWriteable(AuUInt length);
/**
* @brief Long story short, AuList<AuUInt8> ~= std::vector<AuUInt8>:
* AuByteBuffer should be (was) a drag and drop replacement for trash code that uses lists as bytebuffers. This is opposed to ye old std::string contains anything buffers.
* Either way, to the point, AuByteBuffer's default constructor is to allow for expandability on Write. GetOrAllocateLinearWriteable is used by certain parse APIs to
* expand ONCE into a buffer that is uninitialized. Should the caller setup the AuByteBuffer, preallocate a runway, etc, etc, GetOrAllocateLinearWriteable will run on
* the memory - the read/write heads of the already setup buffer.
* Otherwise, the default constructor shall allow this to ALLOCATE ONCE given a buffer in an uninitialized/!IsValid()/!refByteBuffer state.
* @brief
* Expected use-case:
* Consider the prototype: void MyFunction(AuByteBuffer &output);
*
* Where the valid expected uses fall in line with the following truth table:
*
* * PASS
* AuByteBuffer defaultBuffer;
* MyFunction(defaultBuffer);
* if (defaultBuffer) {}
*
* * FAIL
* AuByteBuffer defaultBuffer;
* defaultBuffer.WriteTagged<AuUInt32>(32)
* MyFunction(defaultBuffer);
* if (defaultBuffer) {}
*
* * FAIL
* AuByteBuffer defaultBuffer;
* MyFunction(defaultBuffer);
* MyFunction(defaultBuffer); // !!!
*
* * PASS
* AuByteBuffer writeStream(512);
* writeStream.WriteTagged<AuUInt32>(32)
* MyFunction(defaultBuffer);
* if (defaultBuffer) {}
*
* @param length
* @return
*/
@ -439,7 +454,7 @@ namespace Aurora::Memory
inline auline bool SetBuffer(MemoryViewRead readView, bool bMoveWriteHeadForReaders = true);
/**
* @brief Releases excess memory (like, shrink to fit in c++)
* @brief Releases excess memory (comparable to shrink to fit in the c++ stl)
* @return
*/
inline auline void GC();

View File

@ -32,7 +32,15 @@ namespace Aurora::Memory
return true;
}
this->base = ZAlloc<AuUInt8 *>(length);
if (this->alignment)
{
this->base = ZAlloc<AuUInt8 *>(length, this->alignment);
}
else
{
this->base = ZAlloc<AuUInt8 *>(length);
}
if (!this->base)
{
return false;
@ -73,6 +81,7 @@ namespace Aurora::Memory
return false;
}
this->alignment = alignment;
this->length = length;
this->allocSize = length;
this->readPtr = this->base;
@ -122,6 +131,7 @@ namespace Aurora::Memory
this->allocSize = {};
this->readPtr = {};
this->writePtr = {};
this->alignment = {};
this->scaleSize = kBufferInitialPower;
}
@ -172,16 +182,25 @@ namespace Aurora::Memory
auto uReadOffset = this->readPtr - this->base;
auto uWriteOffset = this->writePtr - this->base;
auto temp = Memory::ZRealloc(this->base, this->length);
if (!temp)
AuUInt8 *pNext {};
if (this->alignment)
{
pNext = Memory::ZRealloc(this->base, this->length, this->alignment);
}
else
{
pNext = Memory::ZRealloc(this->base, this->length);
}
if (!pNext)
{
return;
}
this->base = temp;
this->base = pNext;
this->allocSize = this->length;
this->readPtr = temp + uReadOffset;
this->writePtr = temp + uWriteOffset;
this->readPtr = pNext + uReadOffset;
this->writePtr = pNext + uWriteOffset;
}
bool ByteBuffer::Resize(AuUInt length)
@ -218,7 +237,17 @@ namespace Aurora::Memory
auto newLength = AuMax(length, AuPageRoundUp(this->allocSize + (this->allocSize / 3), AuUInt(64)));
if (auto pNext = ZRealloc(this->base, newLength))
AuUInt8 *pNext {};
if (this->alignment)
{
pNext = ZRealloc(this->base, newLength, this->alignment);
}
else
{
pNext = ZRealloc(this->base, newLength);
}
if (pNext)
{
this->base = pNext;
this->length = length;
@ -235,9 +264,9 @@ namespace Aurora::Memory
return false;
}
replacement.flagAlwaysExpandable = this->flagAlwaysExpandable;
replacement.alignment = this->alignment;
AuUInt uBytesRem {};
if (!this->flagCircular && length < (uBytesRem = this->RemainingBytes()))
{
this->writePtr = (uOldHead + uBytesRem) + this->base;

View File

@ -161,6 +161,7 @@ namespace Aurora::Memory
bool ByteBuffer::ReadString(AuString &string, EStringType type, Locale::ECodePage codepage)
{
ByteBufferPushReadState a(*this);
AuUInt64 uLength {};
AuUInt uZeroByteLength { 0 };

View File

@ -114,23 +114,11 @@ namespace Aurora::Memory
return ToSmartPointer((T *)pPtr, true);
}
/**
* @brief
* @param in
* A pointer owned by the heap
* @param pinThis
* If you attempt to destroy a heap, or allow a shared ptr to free the heap w/o freeing all allocations, the heap will dangle and a telemetry warning will be sent.
* pinThis isn't strictly needed unless you wish to slience heap freed before allocation warnings.
* A memory management model whereby you dont pin the parent heap will prevent circular references from taking over; in contrast, the alternative will need well defined object disposal
* One could keep heap references weak outside of your heap manager and always pin this - or you could pin nothing and be careful with object disposal order
* @return
*/
template<typename T>
AuSPtr<T> ToSmartPointer(T *in, bool pinThis)
AuSPtr<T> ToSmartPointer(T *in, bool bPinThis)
{
if (in == nullptr) return {};
auto heapHandle = pinThis ? GetSelfReference() : AuSPtr<Heap> {};
auto heapHandle = bPinThis ? GetSelfReference() : AuSPtr<Heap> {};
return AuSPtr<T>(in,
[heapHandle, in, this](T *delt)
{
@ -146,7 +134,7 @@ namespace Aurora::Memory
static AuSPtr<T> ToSmartPointer(AuSPtr<Heap> heap, T *in, bool pinHeap = true)
{
auto handle = pinHeap ? heap : AuSPtr<Heap> {};
auto ptr = heap.get(); // so long as in is a valid pointer within the heap, this is fine
auto ptr = heap.get();
return AuSPtr<T>(in,
[handle, ptr](T *delt)
{
@ -180,16 +168,21 @@ namespace Aurora::Memory
AUKN_SHARED_API(GetDefaultDiscontiguousHeap, Heap);
/**
Allocates Fize amount of memory
Allocates uLength amount of contiguous virtual memory
NOTE -> Heaps are guaranteed to outlive its' allocations; heap are the one object that own themselves
Destructions are mere suggestions, however, requesting termination before a heap has released all of its memory will result in a telemetry mayday
@warning Heaps are guaranteed to outlive their allocations; heaps are the one object that effectively own a single reference count on themselves.
Requesting termination before all of its' memory has been free will result, in at worst, a warning.
Expect to leak unless all allocs have been paired by a free.
@return a heap backed by allocated memory
I do not expect to implement force frees simply because all our primary use cases keep track of dtors to forcefully release leaked objects.
Use RequestHeapOfRegion to be backed by caller owned memory.
@return a heap backed by uLength bytes of virtual memory
*/
AUKN_SHARED_API(AllocHeap, Heap, AuUInt uLength);
AUKN_SHARED_API(RequestHeapOfRegion, Heap, void *pPtr, AuUInt uLength);
// AllocHeap but use mimalloc (or the default allocator) instead
AUKN_SHARED_API(AllocHeapMimalloc, Heap, AuUInt uLength);
}