Revert "Reland "[ptr-cage] Turn on shared pointer cage by default for arm64 and x64""

This reverts commit 1f504c36da.

Reason for revert: Failures in Chromium tests causing a blocked roll - https://ci.chromium.org/ui/p/chromium/builders/try/linux_chromium_tsan_rel_ng/848645/overview

Original change's description:
> Reland "[ptr-cage] Turn on shared pointer cage by default for arm64 and x64"
>
> This is a reland of 6c65e858a6
>
> Changes since revert:
>
> - Add --disable-shared-ro-heap-for-testing flag that is implied by
>   d8's --stress-snapshot, since RO heap sharing currently
>   does not support deserializing/serializing while original Isolate
>   is still running
> - Add BUILD.gn assert that v8_enable_pointer_compression_shared_cage
>   requires x64 or arm64
>
> Original change's description:
> > [ptr-cage] Turn on shared pointer cage by default for arm64 and x64
> >
> > Bug: v8:11460
> > Change-Id: I9ab419b5e90fbe677e1d63b41699d90552839e98
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2873226
> > Commit-Queue: Shu-yu Guo <syg@chromium.org>
> > Reviewed-by: Igor Sheludko <ishell@chromium.org>
> > Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#74422}
>
> Bug: v8:11460
> Change-Id: I7eb189b7f8ac3e30da96b0dadd2c085f08a1a0b1
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2878855
> Commit-Queue: Shu-yu Guo <syg@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Adam Klein <adamk@chromium.org>
> Reviewed-by: Igor Sheludko <ishell@chromium.org>
> Reviewed-by: Dan Elphick <delphick@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#74448}

Bug: v8:11460
Change-Id: Ice601be4826adbae1288f3314192bdf6566a366c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2883660
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74473}
This commit is contained in:
Maya Lekova 2021-05-10 12:48:14 +00:00
parent 3615301bed
commit 5848315425
10 changed files with 11 additions and 55 deletions

View File

@ -352,7 +352,7 @@ if (v8_enable_pointer_compression == "") {
v8_current_cpu == "arm64" || v8_current_cpu == "x64"
}
if (v8_enable_pointer_compression_shared_cage == "") {
v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression
v8_enable_pointer_compression_shared_cage = false
}
if (v8_enable_fast_torque == "") {
v8_enable_fast_torque = v8_enable_fast_mksnapshot
@ -425,8 +425,7 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
"Control-flow integrity is only supported on arm64")
if (v8_enable_shared_ro_heap && v8_enable_pointer_compression &&
!v8_enable_pointer_compression_shared_cage) {
if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) {
assert(
is_linux || is_chromeos || is_android,
"Sharing read-only heap with pointer compression is only supported on Linux or Android")
@ -448,10 +447,6 @@ assert(
!v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression,
"Can't share a pointer compression cage if pointers aren't compressed")
assert(!v8_enable_pointer_compression_shared_cage || v8_current_cpu == "x64" ||
v8_current_cpu == "arm64",
"Sharing a pointer compression cage is only supported on x64 and arm64")
assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")

View File

@ -384,6 +384,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Initialize the pointer cage base register.
// TODO(syg): Actually make a cage.
__ LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
#endif

View File

@ -3985,11 +3985,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
// Note: This is not an issue in production because we don't clear SFI's
// there (that only happens in mksnapshot and in --stress-snapshot mode).
i::FLAG_incremental_marking = false;
// The RO heap also cannot be shared, as it serializes and deserializes
// while the original Isolate is still running.
//
// TODO(v8:11750): Decouple serialization and deserialization steps.
i::FLAG_disable_shared_ro_heap_for_testing = true;
argv[i] = nullptr;
} else if (strcmp(argv[i], "--nostress-snapshot") == 0 ||
strcmp(argv[i], "--no-stress-snapshot") == 0) {

View File

@ -577,11 +577,6 @@ void UnregisterNonABICompliantCodeRange(void* start) {
ExceptionHandlerRecord* record =
reinterpret_cast<ExceptionHandlerRecord*>(start);
CHECK(::RtlDeleteFunctionTable(record->runtime_function));
// Unprotect reserved page.
DWORD old_protect;
CHECK(VirtualProtect(start, sizeof(ExceptionHandlerRecord),
PAGE_READWRITE, &old_protect));
}
#endif // V8_OS_WIN_X64
} else {
@ -590,11 +585,6 @@ void UnregisterNonABICompliantCodeRange(void* start) {
if (record->dynamic_table) {
DeleteGrowableFunctionTable(record->dynamic_table);
}
// Unprotect reserved page.
DWORD old_protect;
CHECK(VirtualProtect(start, sizeof(CodeRangeUnwindingRecord),
PAGE_READWRITE, &old_protect));
}
}

View File

@ -3076,8 +3076,7 @@ void Isolate::Deinit() {
#if defined(V8_OS_WIN64)
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
heap()->memory_allocator() && RequiresCodeRange() &&
heap()->code_range()->AtomicDecrementUnwindInfoUseCount() == 1) {
heap()->memory_allocator() && RequiresCodeRange()) {
const base::AddressRegion& code_region = heap()->code_region();
void* start = reinterpret_cast<void*>(code_region.begin());
win64_unwindinfo::UnregisterNonABICompliantCodeRange(start);
@ -3808,8 +3807,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
}
#if defined(V8_OS_WIN64)
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
heap()->code_range()->AtomicIncrementUnwindInfoUseCount() == 0) {
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
const base::AddressRegion& code_region = heap()->code_region();
void* start = reinterpret_cast<void*>(code_region.begin());
size_t size_in_bytes = code_region.size();

View File

@ -377,9 +377,6 @@ DEFINE_BOOL(icu_timezone_data, true, "get information about timezones from ICU")
#define V8_SHARED_RO_HEAP_BOOL false
#endif
DEFINE_BOOL(disable_shared_ro_heap_for_testing, false,
"disables sharing of the read-only heap for testing")
DEFINE_BOOL(lite_mode, V8_LITE_BOOL,
"enables trade-off of performance for memory savings")

View File

@ -88,21 +88,6 @@ class CodeRange final : public VirtualMemoryCage {
return embedded_blob_code_copy_.load(std::memory_order_relaxed);
}
#ifdef V8_OS_WIN64
// 64-bit Windows needs to track how many Isolates are using the CodeRange for
// registering and unregistering of unwind info. Note that even though
// CodeRanges are used with std::shared_ptr, std::shared_ptr::use_count should
// not be used for synchronization as it's usually implemented with a relaxed
// read.
uint32_t AtomicIncrementUnwindInfoUseCount() {
return unwindinfo_use_count_.fetch_add(1, std::memory_order_acq_rel);
}
uint32_t AtomicDecrementUnwindInfoUseCount() {
return unwindinfo_use_count_.fetch_sub(1, std::memory_order_acq_rel);
}
#endif // V8_OS_WIN64
bool InitReservation(v8::PageAllocator* page_allocator, size_t requested);
void Free();
@ -120,6 +105,8 @@ class CodeRange final : public VirtualMemoryCage {
const uint8_t* embedded_blob_code,
size_t embedded_blob_code_size);
// Initializes the process-wide code range if RequiresProcessWideCodeRange()
// is true.
static void InitializeProcessWideCodeRangeOnce(
v8::PageAllocator* page_allocator, size_t requested_size);
@ -135,10 +122,6 @@ class CodeRange final : public VirtualMemoryCage {
// When sharing a CodeRange among Isolates, calls to RemapEmbeddedBuiltins may
// race during Isolate::Init.
base::Mutex remap_embedded_builtins_mutex_;
#ifdef V8_OS_WIN64
std::atomic<uint32_t> unwindinfo_use_count_{0};
#endif
};
} // namespace internal

View File

@ -894,8 +894,6 @@ class Heap {
const base::AddressRegion& code_region();
CodeRange* code_range() { return code_range_.get(); }
LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; }
// ===========================================================================

View File

@ -88,8 +88,7 @@ class ReadOnlyHeap {
// account whether shared memory is available with pointer compression.
static bool IsReadOnlySpaceShared() {
return V8_SHARED_RO_HEAP_BOOL &&
(!COMPRESS_POINTERS_BOOL || COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) &&
!FLAG_disable_shared_ro_heap_for_testing;
(!COMPRESS_POINTERS_BOOL || COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL);
}
virtual void InitializeIsolateRoots(Isolate* isolate) {}

View File

@ -500,9 +500,9 @@ KNOWN_OBJECTS = {
# Lower 32 bits of first page addresses for various heap spaces.
HEAP_FIRST_PAGES = {
0x080c0000: "old_space",
0x08100000: "map_space",
0x08000000: "read_only_space",
0x08100000: "old_space",
0x08140000: "map_space",
0x08040000: "read_only_space",
}
# List of known V8 Frame Markers.