AuroraRuntime/Include/Aurora/IO/Net/ISocketChannel.hpp
Jamie Reece Wilson 978693559e [*] QOL / Hardening / Optimizations / Bug fixes
(but not really. this is just some patchwork)
2024-10-02 00:47:44 +01:00

275 lines
10 KiB
C++

/***
Copyright (C) 2022 J Reece Wilson (a/k/a "Reece"). All rights reserved.
File: ISocketChannel.hpp
Date: 2022-8-15
Author: Reece
***/
#pragma once
namespace Aurora::IO::Protocol
{
struct IProtocolStack;
}
namespace Aurora::IO::Net
{
struct ISocket;
struct ISocketChannelLimits;
struct ISocketChannelEventListener;
struct ISocketChannel : Protocol::IProtocolBaseReader, Protocol::IProtocolBaseWriter
{
/**
* @brief
* @return
*/
virtual AuSPtr<ISocket> ToParent() = 0;
/**
* @brief If not under the drivers data available tick,
* schedules the buffered data
*/
virtual void ScheduleOutOfFrameWrite() = 0;
/**
* @brief
* @return
* @warning [not recommended]
* @warning requires: SpecifyManualRead
*/
virtual AuSPtr<IAsyncTransaction> ToReadTransaction() = 0;
/**
* @brief
* @return
* @warning [not recommended]
* @warning requires: SpecifyManualWrite
*/
virtual AuSPtr<IAsyncTransaction> ToWriteTransaction() = 0;
//
// Protocol stack APIs
// [used for tls, compression, encryption, and friends]
//
/**
* @brief
* @return
*/
virtual AuSPtr<Protocol::IProtocolStack> NewProtocolRecvStack() = 0;
/**
* @brief
* @return
*/
virtual AuSPtr<Protocol::IProtocolStack> NewProtocolSendStack() = 0;
/**
* @brief
* @param pRecvProtocol
*/
virtual void SpecifyRecvProtocol(const AuSPtr<Protocol::IProtocolStack> &pRecvProtocol) = 0;
/**
* @brief
* @param pSendProtocol
*/
virtual void SpecifySendProtocol(const AuSPtr<Protocol::IProtocolStack> &pSendProtocol) = 0;
/**
* @brief
* @param uNextFrameSize
*/
virtual void SetNextFrameTargetLength(AuUInt uNextFrameSize) = 0;
/**
* @brief
* @return
*/
virtual AuUInt GetNextFrameTargetLength() = 0;
//
// The following specify functions are to be used before the socket has established / during preestablish.
// Exceptions will be noted.
//
/**
* @brief Disables usage of the ToWriteTransaction api.
* @param bEnableDirectAIOWrite when true, disables protocol handler behaviour
* and enables the ::ToWriteTransaction() routine
* @return
*/
virtual bool SpecifyManualWrite(bool bEnableDirectAIOWrite) = 0;
/**
* @brief Disables automatic pipe processing of the socket. Instead,
* you must use the ToReadTransaction to read until EoS.
* @param bEnableDirectAIOWrite when true, disables the pipe procoessor
* @return
*/
virtual bool SpecifyManualRead(bool bEnableDirectAIORead) = 0;
/**
* @brief
* @param
* @return
* @warning May only succeed after preestablish if SpecifyManualWrite was set true.
*
* Otherwise, nagle is disabled bc it is a beyond dumb optimization of an era of TTY.
* It is not unreasonable for a client and server to use a similar tick rate, with
* identical io-constraints, doing things at a similar rate, to process packets given
* shared bandwidth constraints before DoS-dropping.
* (just configure your buffer sizes correctly - perhaps in real time)
*
* Point is, any reasonably buffered protocol *wants* a flush after a frame or more has
* been written in an IO/application tick.
* If you don't, you end up waiting on the other side to send a response to a request you've
* never actually sent.
*
* As a frame of reference, NGINX, CURL, chromium, all either use nodelay by default or the
* community recommends its' usage as a default.
* https://bugzilla.mozilla.org/show_bug.cgi?id=542401
* https://curl.se/libcurl/c/CURLOPT_TCP_NODELAY.html
* https://github.com/websocket-client/websocket-client/issues/41
*/
virtual bool SpecifyTCPNoDelay(bool bFuckNagle) = 0;
/**
* @brief [...]
* @return
*/
virtual bool GetCurrentTCPNoDelay() = 0;
/**
* @brief In manual mode, you may wish to use the loop source
* interface of the IAsyncTransaction. By default, Network
* io is optimized in such a way that IO fences aren't
* required for use.
* default: false
* @param bAllocateFence
* @return
*/
virtual bool SpecifyTransactionsHaveIOFence(bool bAllocateFence) = 0;
/**
* @brief Specifies the READ-bound rate-limit per IO tick
* @param uBytes
* @param pCallbackOptional
* @return
*/
virtual bool SpecifyPerTickAsyncReadLimit(AuUInt uBytes) = 0;
/**
* @brief Reallocate both the input and output bytebuffer
* @param uBytes ...
* @param pCallbackOptional optional if on thread
* @return
* [can work after establish under the right conditions]
*/
virtual bool SpecifyBufferSize(AuUInt uBytes,
const AuSPtr<AuAsync::PromiseCallback<AuNullS, AuNullS>> &pCallbackOptional) = 0;
/**
* @brief
* @param uBytes ...
* @param pCallbackOptional optional if on thread
* @return
* @warning The size specified must not interfer with the current
* operation or state of the underlying stream.
* You cannot resize a buffer, mid-pipe, if it means dropping buffered data
* [can work after establish under the right conditions]
*/
virtual bool SpecifyInputBufferSize(AuUInt uBytes,
const AuSPtr<AuAsync::PromiseCallback<AuNullS, AuNullS>> &pCallbackOptional) = 0;
/**
* @brief [...]
* @return
*/
virtual AuUInt GetInputBufferSize() = 0;
/**
* @brief
* @param uBytes ...
* @param pCallbackOptional optional if on thread
* @return
* @warning The size specified must not interfer with the current
* operation or state of the underlying stream.
* You cannot resize a buffer, mid-pipe, if it means dropping buffered data
* [can work after establish under the right conditions]
*/
virtual bool SpecifyOutputBufferSize(AuUInt uBytes,
const AuSPtr<AuAsync::PromiseCallback<AuNullS, AuNullS>> &pCallbackOptional) = 0;
/**
* @brief [...]
* @return
*/
virtual AuUInt GetOutputBufferSize() = 0;
/**
* Recommended: 0, consume everything within the constraints of GetInputBufferSize/SpecifyInputBufferSize.
* use SingleFrame stack pieces to recursively tick a stack of protocol handlers.
* use DynamicBuffer protocol stack pieces to dynamically scale the output size.
*
* When not zero, this is the packet length per io processor frame.
* The io read is capped to segment of this size (uPageSize out of GetInputBufferSize/SpecifyInputBufferSize).
* In theory it is possible to boost max connection throughput (fair bandwidth between higher socket count) at the cost of lower bandwidth, increased tickrate, and higher cpu usage.
*
* Let say you can afford to buffer 10 frames.
* Should an aggressively written application dequeue of all of that at once, you would need to:
* >allocate x10 the amount of memory in a single io tick,
* >need to worry about how much cpu work time a single request or batch of requests take,
* >peak memory usage requirements for decompression and encryption handlers (you need x10 the peak memory usage).
* uPageSize can retard connections down into keeping their pending to-read memory elsewhere in the network stack.
*
* if you know the MTU is 32k, SpecifyPageLength(32k) for 1:1 op/tick.
* if you know the target is one tcp frame tick, SpecifyPageLength(64k) for 1:1 op/tick.
* udp may need splitting up across 576byte frames.
* you may want to bulk read multiple frames from kernel allocated network packets, over hammering tickrate.
* on the otherhand, you may want to limit how many segments you dequeue in a single io processor tick and ::read/ReadFromEx/ReadFileEx/io_submit([pread])(uNBytes) to a multiplier of your max expected packet size.
*/
virtual void SpecifyPageLength(AuUInt uPageSize) = 0;
/**
* @brief
* @return
*/
virtual AuUInt GetPageLength() = 0;
//
// BREAK! Ending preestablishment bias
// BREAK! Ending preestablishment bias
//
/**
* @brief
* @return
*/
virtual AuSPtr<ISocketStats> GetRecvStats() = 0;
/**
* @brief
* @return
*/
virtual AuSPtr<ISocketStats> GetSendStats() = 0;
/**
* @brief
* @param pListener
*/
virtual void AddEventListener(const AuSPtr<ISocketChannelEventListener> &pListener) = 0;
/**
* @brief
* @param pListener
*/
virtual void RemoveEventListener(const AuSPtr<ISocketChannelEventListener> &pListener) = 0;
virtual AuSPtr<ISocketChannelLimits> GetChannelLimits() = 0;
AURT_ADD_USR_DATA;
};
}