Revert "Reland^3 "[ptr-cage] Turn on shared pointer cage by default for arm64 and x64"""
This reverts commit8b74fd4590
. Reason for revert: Blocking the roll into Chromium, example failure on Windows 64 bot: https://ci.chromium.org/p/chromium/builders/try/win10_chromium_x64_rel_ng/863189? Original change's description: > Reland^3 "[ptr-cage] Turn on shared pointer cage by default for arm64 and x64"" > > This is a reland of054ff044bc
> > Change since revert: > > - Remove assignment to FLAG_enable_short_builtins in test since > it's write-once in CFI. > > Original change's description: > > Reland^2 "[ptr-cage] Turn on shared pointer cage by default for arm64 and x64" > > > > This is a reland of1f504c36da
> > > > Changes since revert: > > > > - Removed disabling of RO heap sharing when --stress-snapshot is passed; > > was fixed byf4a6c628c9
> > - Fixed crashing tests that caused revert separately in > >a61aa4919f
> > > > Original change's description: > > > > [ptr-cage] Turn on shared pointer cage by default for arm64 and x64 > > > > > > > > Reviewed-on: > > > https://chromium-review.googlesource.com/c/v8/v8/+/2873226 > > > > Reviewed-by: Igor Sheludko <ishell@chromium.org> > > > > Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> > > > > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > > > > Cr-Commit-Position: refs/heads/master@{#74422} > > > > > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2878855 > > > Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> > > > Reviewed-by: Adam Klein <adamk@chromium.org> > > > Reviewed-by: Igor Sheludko <ishell@chromium.org> > > > Reviewed-by: Dan Elphick <delphick@chromium.org> > > > Cr-Commit-Position: refs/heads/master@{#74448} > > > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2891460 > > Reviewed-by: Adam Klein <adamk@chromium.org> > > Commit-Queue: Shu-yu Guo <syg@chromium.org> > > Cr-Commit-Position: refs/heads/master@{#74546} > > TBR=adamk@chromium.org > > Bug: v8:11460 > Change-Id: Ib7526270d421a562cb00aec9a28b4fc2296e4a86 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2893567 > Reviewed-by: Shu-yu Guo <syg@chromium.org> > Reviewed-by: Adam Klein <adamk@chromium.org> > Commit-Queue: Shu-yu Guo <syg@chromium.org> > Cr-Commit-Position: refs/heads/master@{#74548} Bug: v8:11460 Change-Id: Ie1a6a5d7e7928f6b90571a33dc743ca5d1d082b4 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2893823 Auto-Submit: Bill Budge <bbudge@chromium.org> Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com> Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com> Cr-Commit-Position: refs/heads/master@{#74557}
This commit is contained in:
parent
231cdee871
commit
9ee5bdc975
9
BUILD.gn
9
BUILD.gn
@ -352,7 +352,7 @@ if (v8_enable_pointer_compression == "") {
|
||||
v8_current_cpu == "arm64" || v8_current_cpu == "x64"
|
||||
}
|
||||
if (v8_enable_pointer_compression_shared_cage == "") {
|
||||
v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression
|
||||
v8_enable_pointer_compression_shared_cage = false
|
||||
}
|
||||
if (v8_enable_fast_torque == "") {
|
||||
v8_enable_fast_torque = v8_enable_fast_mksnapshot
|
||||
@ -425,8 +425,7 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
|
||||
assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
|
||||
"Control-flow integrity is only supported on arm64")
|
||||
|
||||
if (v8_enable_shared_ro_heap && v8_enable_pointer_compression &&
|
||||
!v8_enable_pointer_compression_shared_cage) {
|
||||
if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) {
|
||||
assert(
|
||||
is_linux || is_chromeos || is_android,
|
||||
"Sharing read-only heap with pointer compression is only supported on Linux or Android")
|
||||
@ -448,10 +447,6 @@ assert(
|
||||
!v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression,
|
||||
"Can't share a pointer compression cage if pointers aren't compressed")
|
||||
|
||||
assert(!v8_enable_pointer_compression_shared_cage || v8_current_cpu == "x64" ||
|
||||
v8_current_cpu == "arm64",
|
||||
"Sharing a pointer compression cage is only supported on x64 and arm64")
|
||||
|
||||
assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
|
||||
"Write barriers can't be both enabled and disabled")
|
||||
|
||||
|
@ -384,6 +384,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
|
||||
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||
// Initialize the pointer cage base register.
|
||||
// TODO(syg): Actually make a cage.
|
||||
__ LoadRootRelative(kPtrComprCageBaseRegister,
|
||||
IsolateData::cage_base_offset());
|
||||
#endif
|
||||
|
@ -577,11 +577,6 @@ void UnregisterNonABICompliantCodeRange(void* start) {
|
||||
ExceptionHandlerRecord* record =
|
||||
reinterpret_cast<ExceptionHandlerRecord*>(start);
|
||||
CHECK(::RtlDeleteFunctionTable(record->runtime_function));
|
||||
|
||||
// Unprotect reserved page.
|
||||
DWORD old_protect;
|
||||
CHECK(VirtualProtect(start, sizeof(ExceptionHandlerRecord),
|
||||
PAGE_READWRITE, &old_protect));
|
||||
}
|
||||
#endif // V8_OS_WIN_X64
|
||||
} else {
|
||||
@ -590,11 +585,6 @@ void UnregisterNonABICompliantCodeRange(void* start) {
|
||||
if (record->dynamic_table) {
|
||||
DeleteGrowableFunctionTable(record->dynamic_table);
|
||||
}
|
||||
|
||||
// Unprotect reserved page.
|
||||
DWORD old_protect;
|
||||
CHECK(VirtualProtect(start, sizeof(CodeRangeUnwindingRecord),
|
||||
PAGE_READWRITE, &old_protect));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3101,8 +3101,7 @@ void Isolate::Deinit() {
|
||||
|
||||
#if defined(V8_OS_WIN64)
|
||||
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
|
||||
heap()->memory_allocator() && RequiresCodeRange() &&
|
||||
heap()->code_range()->AtomicDecrementUnwindInfoUseCount() == 1) {
|
||||
heap()->memory_allocator() && RequiresCodeRange()) {
|
||||
const base::AddressRegion& code_region = heap()->code_region();
|
||||
void* start = reinterpret_cast<void*>(code_region.begin());
|
||||
win64_unwindinfo::UnregisterNonABICompliantCodeRange(start);
|
||||
@ -3832,8 +3831,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
|
||||
}
|
||||
|
||||
#if defined(V8_OS_WIN64)
|
||||
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
|
||||
heap()->code_range()->AtomicIncrementUnwindInfoUseCount() == 0) {
|
||||
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
|
||||
const base::AddressRegion& code_region = heap()->code_region();
|
||||
void* start = reinterpret_cast<void*>(code_region.begin());
|
||||
size_t size_in_bytes = code_region.size();
|
||||
|
@ -88,21 +88,6 @@ class CodeRange final : public VirtualMemoryCage {
|
||||
return embedded_blob_code_copy_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
#ifdef V8_OS_WIN64
|
||||
// 64-bit Windows needs to track how many Isolates are using the CodeRange for
|
||||
// registering and unregistering of unwind info. Note that even though
|
||||
// CodeRanges are used with std::shared_ptr, std::shared_ptr::use_count should
|
||||
// not be used for synchronization as it's usually implemented with a relaxed
|
||||
// read.
|
||||
uint32_t AtomicIncrementUnwindInfoUseCount() {
|
||||
return unwindinfo_use_count_.fetch_add(1, std::memory_order_acq_rel);
|
||||
}
|
||||
|
||||
uint32_t AtomicDecrementUnwindInfoUseCount() {
|
||||
return unwindinfo_use_count_.fetch_sub(1, std::memory_order_acq_rel);
|
||||
}
|
||||
#endif // V8_OS_WIN64
|
||||
|
||||
bool InitReservation(v8::PageAllocator* page_allocator, size_t requested);
|
||||
|
||||
void Free();
|
||||
@ -120,12 +105,14 @@ class CodeRange final : public VirtualMemoryCage {
|
||||
const uint8_t* embedded_blob_code,
|
||||
size_t embedded_blob_code_size);
|
||||
|
||||
// Initializes the process-wide code range if RequiresProcessWideCodeRange()
|
||||
// is true.
|
||||
static void InitializeProcessWideCodeRangeOnce(
|
||||
v8::PageAllocator* page_allocator, size_t requested_size);
|
||||
|
||||
// If InitializeProcessWideCodeRangeOnce has been called, returns the
|
||||
// initialized CodeRange. Otherwise returns an empty std::shared_ptr.
|
||||
V8_EXPORT_PRIVATE static std::shared_ptr<CodeRange> GetProcessWideCodeRange();
|
||||
static std::shared_ptr<CodeRange> GetProcessWideCodeRange();
|
||||
|
||||
private:
|
||||
// Used when short builtin calls are enabled, where embedded builtins are
|
||||
@ -135,10 +122,6 @@ class CodeRange final : public VirtualMemoryCage {
|
||||
// When sharing a CodeRange among Isolates, calls to RemapEmbeddedBuiltins may
|
||||
// race during Isolate::Init.
|
||||
base::Mutex remap_embedded_builtins_mutex_;
|
||||
|
||||
#ifdef V8_OS_WIN64
|
||||
std::atomic<uint32_t> unwindinfo_use_count_{0};
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@ -894,8 +894,6 @@ class Heap {
|
||||
|
||||
const base::AddressRegion& code_region();
|
||||
|
||||
CodeRange* code_range() { return code_range_.get(); }
|
||||
|
||||
LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; }
|
||||
|
||||
// ===========================================================================
|
||||
|
@ -106,7 +106,8 @@ UNINITIALIZED_TEST(SharedPtrComprCageRemappedBuiltinsJitlessFalseToTrue) {
|
||||
// builtins. Toggling jitless from false to true with shared pointer
|
||||
// compression cage is not supported.
|
||||
|
||||
if (!V8_SHORT_BUILTIN_CALLS_BOOL || !FLAG_short_builtin_calls) return;
|
||||
if (!V8_SHORT_BUILTIN_CALLS_BOOL) return;
|
||||
FLAG_short_builtin_calls = true;
|
||||
FLAG_jitless = false;
|
||||
|
||||
constexpr uint64_t kMemoryGB = 4;
|
||||
|
@ -500,9 +500,9 @@ KNOWN_OBJECTS = {
|
||||
|
||||
# Lower 32 bits of first page addresses for various heap spaces.
|
||||
HEAP_FIRST_PAGES = {
|
||||
0x080c0000: "old_space",
|
||||
0x08100000: "map_space",
|
||||
0x08000000: "read_only_space",
|
||||
0x08100000: "old_space",
|
||||
0x08140000: "map_space",
|
||||
0x08040000: "read_only_space",
|
||||
}
|
||||
|
||||
# List of known V8 Frame Markers.
|
||||
|
Loading…
Reference in New Issue
Block a user