Revert "[ext-code-space] Change compression scheme for Code pointers"

This reverts commit 70e65f8961.

Reason for revert: Breaks roll (https://chromium-review.googlesource.com/c/chromium/src/+/4030636/6?checksPatchset=6&checksRunsSelected=chromeos-amd64-generic-rel&tab=checks)

Original change's description:
> [ext-code-space] Change compression scheme for Code pointers
>
> Unlike the default scheme the ExternalCodeCompressionScheme allows
> the cage to cross 4GB boundary at a price of making decompression
> slightly more complex. The former outweighs the latter because it
> gives us more flexibility in allocating the code range closer to
> the .text section in the process address space. At the same time
> decompression of the external code field happens relatively rarely
> during GC.
>
> Bug: v8:11880
> Change-Id: Ia62bedd318f88c2147534ff000ab9fad354777f3
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3899307
> Commit-Queue: Igor Sheludko <ishell@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#84269}

Bug: v8:11880
Change-Id: I65607590dd12e92c741ccedf84ac3c6b2fcf075e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4031182
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Owners-Override: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84335}
This commit is contained in:
Leszek Swirski 2022-11-17 15:59:12 +00:00 committed by V8 LUCI CQ
parent 76861e3005
commit e4077cc01d
9 changed files with 47 additions and 178 deletions

View File

@ -920,7 +920,6 @@ class CompressedMaybeObjectSlot;
class CompressedMapWordSlot;
class CompressedHeapObjectSlot;
class V8HeapCompressionScheme;
class ExternalCodeCompressionScheme;
template <typename CompressionScheme>
class OffHeapCompressedObjectSlot;
class FullObjectSlot;
@ -952,12 +951,7 @@ struct SlotTraits {
using THeapObjectSlot = CompressedHeapObjectSlot;
using TOffHeapObjectSlot =
OffHeapCompressedObjectSlot<V8HeapCompressionScheme>;
#ifdef V8_EXTERNAL_CODE_SPACE
using TCodeObjectSlot =
OffHeapCompressedObjectSlot<ExternalCodeCompressionScheme>;
#else
using TCodeObjectSlot = TObjectSlot;
#endif // V8_EXTERNAL_CODE_SPACE
using TCodeObjectSlot = OffHeapCompressedObjectSlot<V8HeapCompressionScheme>;
#else
using TObjectSlot = FullObjectSlot;
using TMaybeObjectSlot = FullMaybeObjectSlot;
@ -2088,7 +2082,7 @@ class PtrComprCageBase {
// NOLINTNEXTLINE
inline PtrComprCageBase(const LocalIsolate* isolate);
inline Address address() const { return address_; }
inline Address address() const;
bool operator==(const PtrComprCageBase& other) const {
return address_ == other.address_;

View File

@ -19,6 +19,13 @@ PtrComprCageBase::PtrComprCageBase(const Isolate* isolate)
PtrComprCageBase::PtrComprCageBase(const LocalIsolate* isolate)
: address_(isolate->cage_base()) {}
Address PtrComprCageBase::address() const {
Address ret = address_;
ret = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
reinterpret_cast<void*>(ret), kPtrComprCageBaseAlignment));
return ret;
}
//
// V8HeapCompressionScheme
//
@ -32,10 +39,7 @@ Address V8HeapCompressionScheme::GetPtrComprCageBaseAddress(
// static
Address V8HeapCompressionScheme::GetPtrComprCageBaseAddress(
PtrComprCageBase cage_base) {
Address base = cage_base.address();
base = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
reinterpret_cast<void*>(base), kPtrComprCageBaseAlignment));
return base;
return cage_base.address();
}
// static
@ -81,66 +85,6 @@ void V8HeapCompressionScheme::ProcessIntermediatePointers(
callback(decompressed_high);
}
#ifdef V8_EXTERNAL_CODE_SPACE
//
// ExternalCodeCompressionScheme
//
// static
Address ExternalCodeCompressionScheme::PrepareCageBaseAddress(
Address on_heap_addr) {
return RoundDown<kMinExpectedOSPageSize>(on_heap_addr);
}
// static
Address ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(
PtrComprCageBase cage_base) {
Address base = cage_base.address();
base = reinterpret_cast<Address>(
V8_ASSUME_ALIGNED(reinterpret_cast<void*>(base), kMinExpectedOSPageSize));
return base;
}
// static
Tagged_t ExternalCodeCompressionScheme::CompressTagged(Address tagged) {
return static_cast<Tagged_t>(static_cast<uint32_t>(tagged));
}
// static
Address ExternalCodeCompressionScheme::DecompressTaggedSigned(
Tagged_t raw_value) {
// For runtime code the upper 32-bits of the Smi value do not matter.
return static_cast<Address>(raw_value);
}
// static
template <typename TOnHeapAddress>
Address ExternalCodeCompressionScheme::DecompressTaggedPointer(
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr);
Address diff = static_cast<Address>(static_cast<uint32_t>(raw_value)) -
static_cast<Address>(static_cast<uint32_t>(cage_base));
// The cage base value was chosen such that it's less or equal than any
// pointer in the cage, thus if we got a negative diff then it means that
// the decompressed value is off by 4GB.
if (static_cast<intptr_t>(diff) < 0) {
diff += size_t{4} * GB;
}
DCHECK(is_uint32(diff));
return cage_base + diff;
}
// static
template <typename TOnHeapAddress>
Address ExternalCodeCompressionScheme::DecompressTaggedAny(
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
if (HAS_SMI_TAG(raw_value)) return DecompressTaggedSigned(raw_value);
return DecompressTaggedPointer(on_heap_addr, raw_value);
}
#endif // V8_EXTERNAL_CODE_SPACE
//
// Misc functions.
//

View File

@ -50,71 +50,9 @@ class V8HeapCompressionScheme {
};
#ifdef V8_EXTERNAL_CODE_SPACE
// Compression scheme used for fields containing Code objects (namely for the
// CodeDataContainer::code field).
// Unlike the V8HeapCompressionScheme this one allows the cage to cross 4GB
// boundary at a price of making decompression slightly more complex.
// The former outweighs the latter because it gives us more flexibility in
// allocating the code range closer to .text section in the process address
// space. At the same time decompression of the external code field happens
// relatively rarely during GC.
// The base can be any value such that [base, base + 4GB) contains the whole
// code range.
//
// This scheme works as follows:
// --|----------{---------|------}--------------|--
// 4GB | 4GB | 4GB
// +-- code range --+
// |
// cage base
//
// * Cage base value is OS page aligned for simplicity (although it's not
// strictly necessary).
// * Code range size is smaller than or equal to 4GB.
// * Compression is just a truncation to 32-bits value.
// * Decompression of a pointer:
// - if "compressed" cage base is <= than compressed value then one just
// needs to OR the upper 32-bits of the case base to get the decompressed
// value.
// - if compressed value is smaller than "compressed" cage base then ORing
// the upper 32-bits of the cage base is not enough because the resulting
// value will be off by 4GB, which has to be added to the result.
// - note that decompression doesn't modify the lower 32-bits of the value.
// * Decompression of Smi values is made a no-op for simplicity given that
// on the hot paths of decompressing the Code pointers it's already known
// that the value is not a Smi.
//
class ExternalCodeCompressionScheme {
public:
V8_INLINE static Address PrepareCageBaseAddress(Address on_heap_addr);
// Note that this compression scheme doesn't allow reconstruction of the cage
// base value from any arbitrary value, thus the cage base has to be passed
// explicitly to the decompression functions.
static Address GetPtrComprCageBaseAddress(Address on_heap_addr) = delete;
V8_INLINE static Address GetPtrComprCageBaseAddress(
PtrComprCageBase cage_base);
// Compresses full-pointer representation of a tagged value to on-heap
// representation.
V8_INLINE static Tagged_t CompressTagged(Address tagged);
// Decompresses smi value.
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
// Decompresses weak or strong heap object pointer or forwarding pointer,
// preserving both weak- and smi- tags.
template <typename TOnHeapAddress>
V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
Tagged_t raw_value);
// Decompresses any tagged value, preserving both weak- and smi- tags.
template <typename TOnHeapAddress>
V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
Tagged_t raw_value);
};
using ExternalCodeCompressionScheme = V8HeapCompressionScheme;
#endif // V8_EXTERNAL_CODE_SPACE
// Accessors for fields that may be unaligned due to pointer compression.

View File

@ -4285,14 +4285,14 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
}
#ifdef V8_EXTERNAL_CODE_SPACE
if (heap_.code_range()) {
code_cage_base_ = ExternalCodeCompressionScheme::PrepareCageBaseAddress(
code_cage_base_ = ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(
heap_.code_range()->base());
} else {
CHECK(jitless_);
// In jitless mode the code space pages will be allocated in the main
// pointer compression cage.
code_cage_base_ =
ExternalCodeCompressionScheme::PrepareCageBaseAddress(cage_base());
ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(cage_base());
}
#endif // V8_EXTERNAL_CODE_SPACE

View File

@ -108,6 +108,21 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
requested = kMinimumCodeRangeSize;
}
// When V8_EXTERNAL_CODE_SPACE_BOOL is enabled the allocatable region must
// not cross the 4Gb boundary and thus the default compression scheme of
// truncating the Code pointers to 32-bits still works. It's achieved by
// specifying base_alignment parameter.
// Note that the alignment is calculated before adjusting the requested size
// for GetWritableReservedAreaSize(). The reasons are:
// - this extra page is used by breakpad on Windows and it's allowed to cross
// the 4Gb boundary,
// - rounding up the adjusted size would result in requresting unnecessarily
// big aligment.
const size_t base_alignment =
V8_EXTERNAL_CODE_SPACE_BOOL
? base::bits::RoundUpToPowerOfTwo(requested)
: VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
const size_t reserved_area = GetWritableReservedAreaSize();
if (requested < (kMaximalCodeRangeSize - reserved_area)) {
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
@ -122,8 +137,7 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
params.page_allocator = page_allocator;
params.reservation_size = requested;
const size_t allocate_page_size = page_allocator->AllocatePageSize();
params.base_alignment =
VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
params.base_alignment = base_alignment;
params.base_bias_size = RoundUp(reserved_area, allocate_page_size);
params.page_size = MemoryChunk::kPageSize;
params.requested_start_hint =
@ -136,14 +150,10 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
#ifdef V8_EXTERNAL_CODE_SPACE
// Ensure that ExternalCodeCompressionScheme is applicable to all objects
// stored in the code range.
using ComprScheme = ExternalCodeCompressionScheme;
Address base = page_allocator_->begin();
Address last = base + page_allocator_->size() - 1;
PtrComprCageBase code_cage_base{base};
CHECK_EQ(base, ComprScheme::DecompressTaggedPointer(
code_cage_base, ComprScheme::CompressTagged(base)));
CHECK_EQ(last, ComprScheme::DecompressTaggedPointer(
code_cage_base, ComprScheme::CompressTagged(last)));
CHECK_EQ(ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(base),
ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(last));
#endif // V8_EXTERNAL_CODE_SPACE
// On some platforms, specifically Win64, we need to reserve some pages at

View File

@ -3699,14 +3699,6 @@ MaybeObject MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
return HeapObjectReference::Strong(heap_object);
}
#ifdef V8_EXTERNAL_CODE_SPACE
template <>
Object MakeSlotValue<CodeObjectSlot, HeapObjectReferenceType::STRONG>(
HeapObject heap_object) {
return heap_object;
}
#endif // V8_EXTERNAL_CODE_SPACE
// The following specialization
// MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>()
// is not used.
@ -3721,10 +3713,9 @@ static inline void UpdateSlot(PtrComprCageBase cage_base, TSlot slot,
std::is_same<TSlot, ObjectSlot>::value ||
std::is_same<TSlot, FullMaybeObjectSlot>::value ||
std::is_same<TSlot, MaybeObjectSlot>::value ||
std::is_same<TSlot, OffHeapObjectSlot>::value ||
std::is_same<TSlot, CodeObjectSlot>::value,
"Only [Full|OffHeap]ObjectSlot, [Full]MaybeObjectSlot "
"or CodeObjectSlot are expected here");
std::is_same<TSlot, OffHeapObjectSlot>::value,
"Only [Full|OffHeap]ObjectSlot and [Full]MaybeObjectSlot are "
"expected here");
MapWord map_word = heap_obj.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
DCHECK_IMPLIES((!v8_flags.minor_mc && !Heap::InFromPage(heap_obj)),

View File

@ -1448,7 +1448,8 @@ Object CodeDataContainer::raw_code() const {
Object CodeDataContainer::raw_code(PtrComprCageBase cage_base) const {
#ifdef V8_EXTERNAL_CODE_SPACE
return ExternalCodeField<Object>::load(cage_base, *this);
Object value = ExternalCodeField::load(cage_base, *this);
return value;
#else
UNREACHABLE();
#endif // V8_EXTERNAL_CODE_SPACE
@ -1456,7 +1457,7 @@ Object CodeDataContainer::raw_code(PtrComprCageBase cage_base) const {
void CodeDataContainer::set_raw_code(Object value, WriteBarrierMode mode) {
#ifdef V8_EXTERNAL_CODE_SPACE
ExternalCodeField<Object>::Release_Store(*this, value);
ExternalCodeField::Release_Store(*this, value);
CONDITIONAL_WRITE_BARRIER(*this, kCodeOffset, value, mode);
#else
UNREACHABLE();
@ -1471,7 +1472,8 @@ Object CodeDataContainer::raw_code(RelaxedLoadTag tag) const {
Object CodeDataContainer::raw_code(PtrComprCageBase cage_base,
RelaxedLoadTag) const {
#ifdef V8_EXTERNAL_CODE_SPACE
return ExternalCodeField<Object>::Relaxed_Load(cage_base, *this);
Object value = ExternalCodeField::Relaxed_Load(cage_base, *this);
return value;
#else
UNREACHABLE();
#endif // V8_EXTERNAL_CODE_SPACE
@ -1485,7 +1487,7 @@ PtrComprCageBase CodeDataContainer::code_cage_base() const {
return PtrComprCageBase(isolate->code_cage_base());
#else
return GetPtrComprCageBase(*this);
#endif // V8_EXTERNAL_CODE_SPACE
#endif
}
Code CodeDataContainer::code() const {
@ -1493,12 +1495,11 @@ Code CodeDataContainer::code() const {
return CodeDataContainer::code(cage_base);
}
Code CodeDataContainer::code(PtrComprCageBase cage_base) const {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
#ifdef V8_EXTERNAL_CODE_SPACE
DCHECK(!is_off_heap_trampoline());
return ExternalCodeField<Code>::load(cage_base, *this);
#else
UNREACHABLE();
#endif // V8_EXTERNAL_CODE_SPACE
#endif
return Code::cast(raw_code(cage_base));
}
Code CodeDataContainer::code(RelaxedLoadTag tag) const {
@ -1508,12 +1509,8 @@ Code CodeDataContainer::code(RelaxedLoadTag tag) const {
Code CodeDataContainer::code(PtrComprCageBase cage_base,
RelaxedLoadTag tag) const {
#ifdef V8_EXTERNAL_CODE_SPACE
DCHECK(!is_off_heap_trampoline());
return ExternalCodeField<Code>::Relaxed_Load(cage_base, *this);
#else
UNREACHABLE();
#endif // V8_EXTERNAL_CODE_SPACE
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
return Code::cast(raw_code(cage_base, tag));
}
DEF_GETTER(CodeDataContainer, code_entry_point, Address) {

View File

@ -261,9 +261,8 @@ class CodeDataContainer : public HeapObject {
#undef CODE_DATA_FIELDS
#ifdef V8_EXTERNAL_CODE_SPACE
template <typename T>
using ExternalCodeField =
TaggedField<T, kCodeOffset, ExternalCodeCompressionScheme>;
TaggedField<Object, kCodeOffset, ExternalCodeCompressionScheme>;
#endif
class BodyDescriptor;

View File

@ -151,11 +151,7 @@ TEST_F(HeapTest, HeapLayout) {
EXPECT_TRUE(IsAligned(cage_base, size_t{4} * GB));
Address code_cage_base = i_isolate()->code_cage_base();
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
EXPECT_TRUE(IsAligned(code_cage_base, kMinExpectedOSPageSize));
} else {
EXPECT_TRUE(IsAligned(code_cage_base, size_t{4} * GB));
}
EXPECT_TRUE(IsAligned(code_cage_base, size_t{4} * GB));
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
Address isolate_root = i_isolate()->isolate_root();