[ext-code-space] Fix CodeRange allocation logic

1) when generating short builtin calls/jumps assemblers should use the
   offset from the CodeRange base rather than the start of the code
   range reservation because otherwise it's not guaranteed that the
   PC-relative offset will fit into architecture's constraints.
   The code range reservation start could be different from the code
   range base in the following cases:
     * when the "base bias size" is non-zero (on Windows 64),
     * when we ended up over-reserving the address space for the code
       range, which happens as a last resort to fulfil the CodeRange
       alignment requirements.
   See the VirtualMemoryCage description for details.

Drive-by fixes:
2) in case of over-reserving address space for external code range,
   the pre-calculated hint for where the remapped embedded builtins
   should be copied to was outside of the allocatable CodeRange region
   and thus useless. The fix is to use the allocatable region instead
   of the reservation region when calculating the hint.
3) when allocating CodeRange with zero base bias size we can create
   the VirtualMemory reservation from the first attempt simply by
   passing the required base alignment to the VirtualMemory
   constructor.

Bug: v8:11880, chromium:1290591
Change-Id: If341418947e2170d967e22b38bcc371594939c1c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3412089
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78772}
This commit is contained in:
Igor Sheludko 2022-01-26 12:40:53 +01:00 committed by V8 LUCI CQ
parent 708cd79f98
commit 695afbff17
11 changed files with 50 additions and 26 deletions

View File

@ -536,7 +536,7 @@ Address Assembler::runtime_entry_at(Address pc) {
return Assembler::target_address_at(pc, 0 /* unused */);
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
return instr->ImmPCOffset() + options().code_range_start;
return instr->ImmPCOffset() + options().code_range_base;
}
}

View File

@ -272,8 +272,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Returns the target address for a runtime function for the call encoded
// at 'pc'.
// Runtime entries can be temporarily encoded as the offset between the
// runtime function entrypoint and the code range start (stored in the
// code_range_start field), in order to be encodable as we generate the code,
// runtime function entrypoint and the code range base (stored in the
// code_range_base field), in order to be encodable as we generate the code,
// before it is moved into the code space.
inline Address runtime_entry_at(Address pc);

View File

@ -1844,8 +1844,8 @@ int64_t TurboAssembler::CalculateTargetOffset(Address target,
// Assembler::runtime_entry_at()).
// Note, that builtin-to-builitin calls use different OFF_HEAP_TARGET mode
// and therefore are encoded differently.
DCHECK_NE(options().code_range_start, 0);
offset -= static_cast<int64_t>(options().code_range_start);
DCHECK_NE(options().code_range_base, 0);
offset -= static_cast<int64_t>(options().code_range_base);
} else {
offset -= reinterpret_cast<int64_t>(pc);
}

View File

@ -72,14 +72,12 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) {
#endif
options.inline_offheap_trampolines &= !generating_embedded_builtin;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
const base::AddressRegion& code_range = isolate->heap()->code_region();
DCHECK_IMPLIES(code_range.begin() != kNullAddress, !code_range.is_empty());
options.code_range_start = code_range.begin();
options.code_range_base = isolate->heap()->code_range_base();
#endif
options.short_builtin_calls =
isolate->is_short_builtin_calls_enabled() &&
!generating_embedded_builtin &&
(options.code_range_start != kNullAddress) &&
(options.code_range_base != kNullAddress) &&
// Serialization of RUNTIME_ENTRY reloc infos is not supported yet.
!serializer;
return options;

View File

@ -175,9 +175,9 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
// instructions. For example, when the bultins code is re-embedded into the
// code range.
bool short_builtin_calls = false;
// On some platforms, all code is within a given range in the process,
// and the start of this range is configured here.
Address code_range_start = 0;
// On some platforms, all code is created within a certain address range in
// the process, and the base of this code range is configured here.
Address code_range_base = 0;
// Enable pc-relative calls/jumps on platforms that support it. When setting
// this flag, the code range must be small enough to fit all offsets into
// the instruction immediates.

View File

@ -38,9 +38,9 @@ void Assembler::emitw(uint16_t x) {
// TODO(ishell): Rename accordingly once RUNTIME_ENTRY is renamed.
void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsRuntimeEntry(rmode));
DCHECK_NE(options().code_range_start, 0);
DCHECK_NE(options().code_range_base, 0);
RecordRelocInfo(rmode);
uint32_t offset = static_cast<uint32_t>(entry - options().code_range_start);
uint32_t offset = static_cast<uint32_t>(entry - options().code_range_base);
emitl(offset);
}
@ -273,7 +273,7 @@ Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(Address pc) {
}
Address Assembler::runtime_entry_at(Address pc) {
return ReadUnalignedValue<int32_t>(pc) + options().code_range_start;
return ReadUnalignedValue<int32_t>(pc) + options().code_range_base;
}
// -----------------------------------------------------------------------------

View File

@ -172,7 +172,10 @@ uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
size_t embedded_blob_code_size) {
base::MutexGuard guard(&remap_embedded_builtins_mutex_);
const base::AddressRegion& code_region = reservation()->region();
// Remap embedded builtins into the end of the address range controlled by
// the BoundedPageAllocator.
const base::AddressRegion code_region(page_allocator()->begin(),
page_allocator()->size());
CHECK_NE(code_region.begin(), kNullAddress);
CHECK(!code_region.is_empty());

View File

@ -191,6 +191,10 @@ inline const base::AddressRegion& Heap::code_region() {
#endif
}
Address Heap::code_range_base() {
return code_range_ ? code_range_->base() : kNullAddress;
}
int Heap::MaxRegularHeapObjectSize(AllocationType allocation) {
if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
(allocation == AllocationType::kCode)) {

View File

@ -931,10 +931,15 @@ class Heap {
return array_buffer_sweeper_.get();
}
// The potentially overreserved address space region reserved by the code
// range if it exists or empty region otherwise.
const base::AddressRegion& code_region();
CodeRange* code_range() { return code_range_.get(); }
// The base of the code range if it exists or null address.
inline Address code_range_base();
LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; }
Heap* AsHeap() { return this; }

View File

@ -368,14 +368,17 @@ bool VirtualMemoryCage::InitReservation(
VirtualMemory(params.page_allocator, existing_reservation.begin(),
existing_reservation.size());
base_ = reservation_.address() + params.base_bias_size;
} else if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
// When the base doesn't need to be aligned, the virtual memory reservation
// fails only due to OOM.
} else if (params.base_alignment == ReservationParams::kAnyBaseAlignment ||
params.base_bias_size == 0) {
// When the base doesn't need to be aligned or when the requested
// base_bias_size is zero, the virtual memory reservation fails only
// due to OOM.
Address hint =
RoundDown(params.requested_start_hint,
RoundUp(params.base_alignment, allocate_page_size));
VirtualMemory reservation(params.page_allocator, params.reservation_size,
reinterpret_cast<void*>(hint));
reinterpret_cast<void*>(hint),
params.base_alignment);
if (!reservation.IsReserved()) return false;
reservation_ = std::move(reservation);
@ -455,6 +458,7 @@ bool VirtualMemoryCage::InitReservation(
RoundDown(params.reservation_size - (allocatable_base - base_) -
params.base_bias_size,
params.page_size);
size_ = allocatable_base + allocatable_size - base_;
page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
params.page_allocator, allocatable_base, allocatable_size,
params.page_size,
@ -465,6 +469,7 @@ bool VirtualMemoryCage::InitReservation(
void VirtualMemoryCage::Free() {
if (IsReserved()) {
base_ = kNullAddress;
size_ = 0;
page_allocator_.reset();
reservation_.Free();
}

View File

@ -296,16 +296,18 @@ class VirtualMemory final {
// ranges (on platforms that require code ranges) and are configurable via
// ReservationParams.
//
// +------------+-----------+----------- ~~~ -+
// | ... | ... | ... |
// +------------+-----------+------------ ~~~ -+
// +------------+-----------+------------ ~~~ --+- ~~~ -+
// | ... | ... | ... | ... |
// +------------+-----------+------------ ~~~ --+- ~~~ -+
// ^ ^ ^
// start cage base allocatable base
//
// <------------> <------------------->
// base bias size allocatable size
// <-------------------------------------------->
// reservation size
// <------------------------------->
// cage size
// <---------------------------------------------------->
// reservation size
//
// - The reservation is made using ReservationParams::page_allocator.
// - start is the start of the virtual memory reservation.
@ -313,9 +315,13 @@ class VirtualMemory final {
// - allocatable base is the cage base rounded up to the nearest
// ReservationParams::page_size, and is the start of the allocatable area for
// the BoundedPageAllocator.
// - cage size is the size of the area from cage base to the end of the
// allocatable area.
//
// - The base bias is configured by ReservationParams::base_bias_size.
// - The reservation size is configured by ReservationParams::reservation_size.
// - The reservation size is configured by ReservationParams::reservation_size
// but it might be actually bigger if we end up over-reserving the virtual
// address space.
//
// Additionally,
// - The alignment of the cage base is configured by
@ -346,6 +352,7 @@ class VirtualMemoryCage {
VirtualMemoryCage& operator=(VirtualMemoryCage&& other) V8_NOEXCEPT;
Address base() const { return base_; }
size_t size() const { return size_; }
base::BoundedPageAllocator* page_allocator() const {
return page_allocator_.get();
@ -356,6 +363,7 @@ class VirtualMemoryCage {
bool IsReserved() const {
DCHECK_EQ(base_ != kNullAddress, reservation_.IsReserved());
DCHECK_EQ(base_ != kNullAddress, size_ != 0);
return reservation_.IsReserved();
}
@ -386,6 +394,7 @@ class VirtualMemoryCage {
protected:
Address base_ = kNullAddress;
size_t size_ = 0;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
VirtualMemory reservation_;
};