From 250525be1deb6dd577987beb6a8b5c49f78178b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marja=20H=C3=B6ltt=C3=A4?= Date: Tue, 20 Dec 2022 11:00:07 +0100 Subject: [PATCH 001/654] [rab/gsab] Fix ValueSerializer error handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Error mode: a TypedArray backed by RAB claims it's not backed by RAB. Drive-by: disable resizability even harder when --harmony-rab-gsab is not on. Bug: v8:11111, chromium:1402139 Change-Id: I937c69f6124419cc8d29da0195686bc3b9a5c281 Fixed: chromium:1402139 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110751 Reviewed-by: Shu-yu Guo Commit-Queue: Marja Hölttä Cr-Commit-Position: refs/heads/main@{#84952} --- src/objects/value-serializer.cc | 18 +++++++++++++++--- test/mjsunit/regress/regress-crbug-1402139.js | 13 +++++++++++++ 2 files changed, 28 insertions(+), 3 deletions(-) create mode 100644 test/mjsunit/regress/regress-crbug-1402139.js diff --git a/src/objects/value-serializer.cc b/src/objects/value-serializer.cc index 725851b62c..29bb93ece9 100644 --- a/src/objects/value-serializer.cc +++ b/src/objects/value-serializer.cc @@ -2162,10 +2162,17 @@ bool ValueDeserializer::ValidateJSArrayBufferViewFlags( // TODO(marja): When the version number is bumped the next time, check that // serialized_flags doesn't contain spurious 1-bits. + if (!v8_flags.harmony_rab_gsab) { + // Disable resizability. This ensures that no resizable buffers are + // created in a version which has the harmony_rab_gsab turned off, even if + // such a version is reading data containing resizable buffers from disk. + is_length_tracking = false; + is_backed_by_rab = false; + // The resizability of the buffer was already disabled. + CHECK(!buffer.is_resizable_by_js()); + } + if (is_backed_by_rab || is_length_tracking) { - if (!v8_flags.harmony_rab_gsab) { - return false; - } if (!buffer.is_resizable_by_js()) { return false; } @@ -2173,6 +2180,11 @@ bool ValueDeserializer::ValidateJSArrayBufferViewFlags( return false; } } + // The RAB-ness of the buffer and the TA's "is_backed_by_rab" need to be in + // sync. + if (buffer.is_resizable_by_js() && !buffer.is_shared() && !is_backed_by_rab) { + return false; + } return true; } diff --git a/test/mjsunit/regress/regress-crbug-1402139.js b/test/mjsunit/regress/regress-crbug-1402139.js new file mode 100644 index 0000000000..ce2ca5eef5 --- /dev/null +++ b/test/mjsunit/regress/regress-crbug-1402139.js @@ -0,0 +1,13 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --harmony-rab-gsab + +const rab = new ArrayBuffer(363, {"maxByteLength": 1000}); +const ta = new Uint8Array(rab); +rab.resize(80); +const data = d8.serializer.serialize(ta); +const dataArray = new Uint8Array(data); +dataArray[dataArray.length - 1] = 17; +assertThrows(() => { d8.serializer.deserialize(data); }); From dca1fbd13b8764bd41b8cb6bd71e86b389791246 Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Tue, 20 Dec 2022 07:45:54 +0100 Subject: [PATCH 002/654] [compiler] Extend --trace-opt output - Add timing infos to TraceAbortedJob output since aborted jobs still take time and resources: [aborted optimizing ... - took 0.005, 17.757, 0.061 ms] - Add the calling code kind to 'not marking ... for optimization' since it matters which tier we're currently stuck in: [not marking function ... (MAGLEV) for optimization: already queued] Bug: v8:7700 Change-Id: I9a123ddaa58ed310605cd28473f53ce8ea004fd3 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110747 Reviewed-by: Toon Verwaest Auto-Submit: Jakob Linke Commit-Queue: Toon Verwaest Cr-Commit-Position: refs/heads/main@{#84953} --- src/codegen/compiler.cc | 37 ++++++++++++++++++++------------ src/codegen/compiler.h | 10 +++++++++ src/execution/tiering-manager.cc | 8 +++---- 3 files changed, 37 insertions(+), 18 deletions(-) diff --git a/src/codegen/compiler.cc b/src/codegen/compiler.cc index 4b0cd12437..187a46d510 100644 --- a/src/codegen/compiler.cc +++ b/src/codegen/compiler.cc @@ -202,13 +202,13 @@ class CompilerTracer : public AllStatic { static void TraceFinishMaglevCompile(Isolate* isolate, Handle function, - double ms_prepare, double ms_optimize, - double ms_codegen) { + double ms_prepare, double ms_execute, + double ms_finalize) { if (!v8_flags.trace_opt) return; CodeTracer::Scope scope(isolate->GetCodeTracer()); PrintTracePrefix(scope, "completed compiling", function, CodeKind::MAGLEV); PrintF(scope.file(), " - took %0.3f, %0.3f, %0.3f ms", ms_prepare, - ms_optimize, ms_codegen); + ms_execute, ms_finalize); PrintTraceSuffix(scope); } @@ -231,14 +231,17 @@ class CompilerTracer : public AllStatic { PrintTraceSuffix(scope); } - static void TraceAbortedJob(Isolate* isolate, - OptimizedCompilationInfo* info) { + static void TraceAbortedJob(Isolate* isolate, OptimizedCompilationInfo* info, + double ms_prepare, double ms_execute, + double ms_finalize) { if (!v8_flags.trace_opt) return; CodeTracer::Scope scope(isolate->GetCodeTracer()); PrintTracePrefix(scope, "aborted optimizing", info); if (info->is_osr()) PrintF(scope.file(), " OSR"); PrintF(scope.file(), " because: %s", GetBailoutReason(info->bailout_reason())); + PrintF(scope.file(), " - took %0.3f, %0.3f, %0.3f ms", ms_prepare, + ms_execute, ms_finalize); PrintTraceSuffix(scope); } @@ -1018,7 +1021,9 @@ bool CompileTurbofan_NotConcurrent(Isolate* isolate, if (!PrepareJobWithHandleScope(job, isolate, compilation_info, ConcurrencyMode::kSynchronous)) { - CompilerTracer::TraceAbortedJob(isolate, compilation_info); + CompilerTracer::TraceAbortedJob(isolate, compilation_info, + job->prepare_in_ms(), job->execute_in_ms(), + job->finalize_in_ms()); return false; } @@ -1028,13 +1033,17 @@ bool CompileTurbofan_NotConcurrent(Isolate* isolate, if (job->ExecuteJob(isolate->counters()->runtime_call_stats(), isolate->main_thread_local_isolate())) { UnparkedScope unparked_scope(isolate->main_thread_local_isolate()); - CompilerTracer::TraceAbortedJob(isolate, compilation_info); + CompilerTracer::TraceAbortedJob( + isolate, compilation_info, job->prepare_in_ms(), job->execute_in_ms(), + job->finalize_in_ms()); return false; } } if (job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) { - CompilerTracer::TraceAbortedJob(isolate, compilation_info); + CompilerTracer::TraceAbortedJob(isolate, compilation_info, + job->prepare_in_ms(), job->execute_in_ms(), + job->finalize_in_ms()); return false; } @@ -3948,7 +3957,9 @@ void Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job, } DCHECK_EQ(job->state(), CompilationJob::State::kFailed); - CompilerTracer::TraceAbortedJob(isolate, compilation_info); + CompilerTracer::TraceAbortedJob(isolate, compilation_info, + job->prepare_in_ms(), job->execute_in_ms(), + job->finalize_in_ms()); if (V8_LIKELY(use_result)) { ResetTieringState(*function, osr_offset); if (!IsOSR(osr_offset)) { @@ -3994,11 +4005,9 @@ void Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job, RecordMaglevFunctionCompilation(isolate, function); job->RecordCompilationStats(isolate); - double ms_prepare = job->time_taken_to_prepare().InMillisecondsF(); - double ms_optimize = job->time_taken_to_execute().InMillisecondsF(); - double ms_codegen = job->time_taken_to_finalize().InMillisecondsF(); - CompilerTracer::TraceFinishMaglevCompile(isolate, function, ms_prepare, - ms_optimize, ms_codegen); + CompilerTracer::TraceFinishMaglevCompile( + isolate, function, job->prepare_in_ms(), job->execute_in_ms(), + job->finalize_in_ms()); } #endif } diff --git a/src/codegen/compiler.h b/src/codegen/compiler.h index 32ff1261b3..21ee79145a 100644 --- a/src/codegen/compiler.h +++ b/src/codegen/compiler.h @@ -384,6 +384,16 @@ class OptimizedCompilationJob : public CompilationJob { const char* compiler_name() const { return compiler_name_; } + double prepare_in_ms() const { + return time_taken_to_prepare_.InMillisecondsF(); + } + double execute_in_ms() const { + return time_taken_to_execute_.InMillisecondsF(); + } + double finalize_in_ms() const { + return time_taken_to_finalize_.InMillisecondsF(); + } + protected: // Overridden by the actual implementation. virtual Status PrepareJobImpl(Isolate* isolate) = 0; diff --git a/src/execution/tiering-manager.cc b/src/execution/tiering-manager.cc index d34777f74c..cba7722a29 100644 --- a/src/execution/tiering-manager.cc +++ b/src/execution/tiering-manager.cc @@ -96,10 +96,10 @@ static_assert(sizeof(OptimizationDecision) <= kInt32Size); namespace { -void TraceInOptimizationQueue(JSFunction function) { +void TraceInOptimizationQueue(JSFunction function, CodeKind calling_code_kind) { if (v8_flags.trace_opt_verbose) { - PrintF("[not marking function %s for optimization: already queued]\n", - function.DebugNameCStr().get()); + PrintF("[not marking function %s (%s) for optimization: already queued]\n", + function.DebugNameCStr().get(), CodeKindToString(calling_code_kind)); } } @@ -281,7 +281,7 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function, V8_UNLIKELY(IsInProgress(osr_tiering_state))) { // Note: This effectively disables OSR for the function while it is being // compiled. - TraceInOptimizationQueue(function); + TraceInOptimizationQueue(function, calling_code_kind); return; } From 605e46479aca3449a6ba1350a1de7927c76b86ad Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Tue, 20 Dec 2022 12:54:01 +0100 Subject: [PATCH 003/654] [centry] Remove the unused SaveFPRegsMode parameter The SaveFPRegsMode::kSave specializations of CEntry were unused. Remove this parameter to eliminate dead code. Bug: v8:13606 Change-Id: If3d6f5382101acd477c5d9559a84c88b02a72123 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4114283 Commit-Queue: Jakob Linke Reviewed-by: Toon Verwaest Cr-Commit-Position: refs/heads/main@{#84954} --- src/builtins/arm/builtins-arm.cc | 15 ++--- src/builtins/arm64/builtins-arm64.cc | 17 +++--- src/builtins/builtins-definitions.h | 17 ++---- src/builtins/builtins-internal-gen.cc | 51 +++++------------ src/builtins/builtins.cc | 2 +- src/builtins/builtins.h | 3 +- src/builtins/ia32/builtins-ia32.cc | 13 ++--- src/builtins/x64/builtins-x64.cc | 13 ++--- src/codegen/arm/macro-assembler-arm.cc | 32 +++-------- src/codegen/arm/macro-assembler-arm.h | 19 +++---- src/codegen/arm64/macro-assembler-arm64.cc | 45 ++------------- src/codegen/arm64/macro-assembler-arm64.h | 66 ++++++++++------------ src/codegen/code-factory.cc | 55 +++++------------- src/codegen/code-factory.h | 7 +-- src/codegen/ia32/macro-assembler-ia32.cc | 46 ++++----------- src/codegen/ia32/macro-assembler-ia32.h | 19 +++---- src/codegen/x64/macro-assembler-x64.cc | 49 ++++------------ src/codegen/x64/macro-assembler-x64.h | 20 +++---- src/common/globals.h | 5 +- src/compiler/js-call-reducer.cc | 4 +- src/compiler/js-graph.cc | 23 ++++---- src/compiler/js-graph.h | 6 +- src/compiler/js-typed-lowering.cc | 4 +- src/compiler/wasm-compiler.cc | 3 +- src/debug/debug-evaluate.cc | 16 ++---- 25 files changed, 182 insertions(+), 368 deletions(-) diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc index 8749044af0..2c9c94449a 100644 --- a/src/builtins/arm/builtins-arm.cc +++ b/src/builtins/arm/builtins-arm.cc @@ -2727,8 +2727,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, - bool builtin_exit_frame) { + ArgvMode argv_mode, bool builtin_exit_frame) { // Called from JavaScript; parameters are on stack as if calling JS function. // r0: number of arguments including receiver // r1: pointer to builtin function @@ -2753,8 +2752,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Enter the exit frame that transitions from JavaScript to C++. FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame( - save_doubles == SaveFPRegsMode::kSave, 0, - builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); + 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // Store a copy of argc in callee-saved registers for later. __ mov(r4, Operand(r0)); @@ -2815,7 +2813,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ? no_reg // Callee-saved register r4 still holds argc. : r4; - __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc); + __ LeaveExitFrame(argc, false); __ mov(pc, lr); // Handling of exception. @@ -3050,7 +3048,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, DCHECK_EQ(stack_space, 0); __ ldr(r4, *stack_space_operand); } - __ LeaveExitFrame(false, r4, stack_space_operand != nullptr); + __ LeaveExitFrame(r4, stack_space_operand != nullptr); // Check if the function scheduled an exception. __ LoadRoot(r4, RootIndex::kTheHoleValue); @@ -3155,9 +3153,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // Allocate the v8::Arguments structure in the arguments' space since // it's not controlled by GC. static constexpr int kApiStackSpace = 4; - static constexpr bool kDontSaveDoubles = false; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). // Arguments are after the return address (pushed by EnterExitFrame()). @@ -3238,7 +3235,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { const int kApiStackSpace = 1; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(false, kApiStackSpace); + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); // Create v8::PropertyCallbackInfo object on the stack and initialize // it's args_ field. diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc index fa539031d3..2c3c2e1d6b 100644 --- a/src/builtins/arm64/builtins-arm64.cc +++ b/src/builtins/arm64/builtins-arm64.cc @@ -4751,8 +4751,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, - bool builtin_exit_frame) { + ArgvMode argv_mode, bool builtin_exit_frame) { // The Abort mechanism relies on CallRuntime, which in turn relies on // CEntry, so until this stub has been generated, we have to use a // fall-back Abort mechanism. @@ -4808,7 +4807,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Enter the exit frame. FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame( - save_doubles == SaveFPRegsMode::kSave, x10, extra_stack_space, + x10, extra_stack_space, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // Poke callee-saved registers into reserved space. @@ -4889,7 +4888,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Peek(argc, 2 * kSystemPointerSize); __ Peek(target, 3 * kSystemPointerSize); - __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, x10, x9); + __ LeaveExitFrame(x10, x9); if (argv_mode == ArgvMode::kStack) { // Drop the remaining stack slots and return from the stub. __ DropArguments(x11); @@ -5137,7 +5136,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ Ldr(x19, *stack_space_operand); } - __ LeaveExitFrame(false, x1, x5); + __ LeaveExitFrame(x1, x5); // Check if the function scheduled an exception. __ Mov(x5, ExternalReference::scheduled_exception_address(isolate)); @@ -5249,11 +5248,10 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // Allocate the v8::Arguments structure in the arguments' space, since it's // not controlled by GC. static constexpr int kApiStackSpace = 4; - static constexpr bool kDontSaveDoubles = false; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kDontSaveDoubles, x10, - kApiStackSpace + kCallApiFunctionSpillSpace); + __ EnterExitFrame(x10, kApiStackSpace + kCallApiFunctionSpillSpace, + StackFrame::EXIT); // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). // Arguments are after the return address (pushed by EnterExitFrame()). @@ -5349,7 +5347,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { const int kApiStackSpace = 1; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace); + __ EnterExitFrame(x10, kApiStackSpace + kCallApiFunctionSpillSpace, + StackFrame::EXIT); // Create v8::PropertyCallbackInfo object on the stack and initialize // it's args_ field. diff --git a/src/builtins/builtins-definitions.h b/src/builtins/builtins-definitions.h index 65d57b1ea0..e0da30ba8b 100644 --- a/src/builtins/builtins-definitions.h +++ b/src/builtins/builtins-definitions.h @@ -1072,17 +1072,12 @@ namespace internal { TFJ(AsyncIteratorValueUnwrap, kJSArgcReceiverSlots + 1, kReceiver, kValue) \ \ /* CEntry */ \ - ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \ - ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit, \ - CEntry1ArgvOnStack) \ - ASM(CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit, CEntryDummy) \ - ASM(CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \ - ASM(CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit, CEntryDummy) \ - ASM(CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \ - ASM(CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit, CEntryDummy) \ - ASM(CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit, CEntryDummy) \ - ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \ - ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit, CEntryDummy) \ + ASM(CEntry_Return1_ArgvInRegister_NoBuiltinExit, CEntryDummy) \ + ASM(CEntry_Return1_ArgvOnStack_BuiltinExit, CEntry1ArgvOnStack) \ + ASM(CEntry_Return1_ArgvOnStack_NoBuiltinExit, CEntryDummy) \ + ASM(CEntry_Return2_ArgvInRegister_NoBuiltinExit, CEntryDummy) \ + ASM(CEntry_Return2_ArgvOnStack_BuiltinExit, CEntryDummy) \ + ASM(CEntry_Return2_ArgvOnStack_NoBuiltinExit, CEntryDummy) \ ASM(DirectCEntry, CEntryDummy) \ \ /* String helpers */ \ diff --git a/src/builtins/builtins-internal-gen.cc b/src/builtins/builtins-internal-gen.cc index ced3e9f85c..81090d7ba1 100644 --- a/src/builtins/builtins-internal-gen.cc +++ b/src/builtins/builtins-internal-gen.cc @@ -1230,9 +1230,8 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) { Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver)); const bool builtin_exit_frame = true; - TNode code = - HeapConstant(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, builtin_exit_frame)); + TNode code = HeapConstant( + CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame)); // Unconditionally push argc, target and new target as extra stack arguments. // They will be used by stack frame iterators when constructing stack trace. @@ -1304,56 +1303,34 @@ TF_BUILTIN(AbortCSADcheck, CodeStubAssembler) { TailCallRuntime(Runtime::kAbortCSADcheck, NoContextConstant(), message); } -void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit( +void Builtins::Generate_CEntry_Return1_ArgvOnStack_NoBuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false); + Generate_CEntry(masm, 1, ArgvMode::kStack, false); } -void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit( +void Builtins::Generate_CEntry_Return1_ArgvOnStack_BuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true); + Generate_CEntry(masm, 1, ArgvMode::kStack, true); } -void Builtins:: - Generate_CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit( - MacroAssembler* masm) { - Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false); -} - -void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit( +void Builtins::Generate_CEntry_Return1_ArgvInRegister_NoBuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, false); + Generate_CEntry(masm, 1, ArgvMode::kRegister, false); } -void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit( +void Builtins::Generate_CEntry_Return2_ArgvOnStack_NoBuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, true); + Generate_CEntry(masm, 2, ArgvMode::kStack, false); } -void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit( +void Builtins::Generate_CEntry_Return2_ArgvOnStack_BuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false); + Generate_CEntry(masm, 2, ArgvMode::kStack, true); } -void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit( +void Builtins::Generate_CEntry_Return2_ArgvInRegister_NoBuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true); -} - -void Builtins:: - Generate_CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit( - MacroAssembler* masm) { - Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false); -} - -void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit( - MacroAssembler* masm) { - Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, false); -} - -void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit( - MacroAssembler* masm) { - Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, true); + Generate_CEntry(masm, 2, ArgvMode::kRegister, false); } #if !defined(V8_TARGET_ARCH_ARM) diff --git a/src/builtins/builtins.cc b/src/builtins/builtins.cc index 07b598f79f..fcf9e8f1f0 100644 --- a/src/builtins/builtins.cc +++ b/src/builtins/builtins.cc @@ -570,7 +570,7 @@ bool Builtins::CodeObjectIsExecutable(Builtin builtin) { // TODO(delphick): Remove this when calls to it have the trampoline inlined // or are converted to use kCallBuiltinPointer. - case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit: + case Builtin::kCEntry_Return1_ArgvOnStack_NoBuiltinExit: return true; default: #if V8_TARGET_ARCH_MIPS64 diff --git a/src/builtins/builtins.h b/src/builtins/builtins.h index 8924c36f5a..8098a0e64c 100644 --- a/src/builtins/builtins.h +++ b/src/builtins/builtins.h @@ -220,8 +220,7 @@ class Builtins { static void Generate_Adaptor(MacroAssembler* masm, Address builtin_address); static void Generate_CEntry(MacroAssembler* masm, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, - bool builtin_exit_frame); + ArgvMode argv_mode, bool builtin_exit_frame); static bool AllowDynamicFunction(Isolate* isolate, Handle target, Handle target_global_proxy); diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc index 589b5741b7..0d1d8d2d7c 100644 --- a/src/builtins/ia32/builtins-ia32.cc +++ b/src/builtins/ia32/builtins-ia32.cc @@ -3004,8 +3004,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, - bool builtin_exit_frame) { + ArgvMode argv_mode, bool builtin_exit_frame) { // eax: number of arguments including receiver // edx: pointer to C function // ebp: frame pointer (restored after C call) @@ -3034,7 +3033,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Enter the exit frame that transitions from JavaScript to C++. if (argv_mode == ArgvMode::kRegister) { - DCHECK(save_doubles == SaveFPRegsMode::kIgnore); DCHECK(!builtin_exit_frame); __ EnterApiExitFrame(arg_stack_space, edi); @@ -3042,9 +3040,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ mov(esi, ecx); __ mov(edi, eax); } else { - __ EnterExitFrame( - arg_stack_space, save_doubles == SaveFPRegsMode::kSave, - builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); + __ EnterExitFrame(arg_stack_space, builtin_exit_frame + ? StackFrame::BUILTIN_EXIT + : StackFrame::EXIT); } // edx: pointer to C function @@ -3090,8 +3088,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, } // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, - argv_mode == ArgvMode::kStack); + __ LeaveExitFrame(argv_mode == ArgvMode::kStack); __ ret(0); // Handling of exception. diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc index 46b8584dcd..bb6e373aac 100644 --- a/src/builtins/x64/builtins-x64.cc +++ b/src/builtins/x64/builtins-x64.cc @@ -4507,8 +4507,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, - bool builtin_exit_frame) { + ArgvMode argv_mode, bool builtin_exit_frame) { // rax: number of arguments including receiver // rbx: pointer to C function (C callee-saved) // rbp: frame pointer of calling JS frame (restored after C call) @@ -4547,15 +4546,14 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, kArgExtraStackSpace + (result_size <= kMaxRegisterResultSize ? 0 : result_size); if (argv_mode == ArgvMode::kRegister) { - DCHECK(save_doubles == SaveFPRegsMode::kIgnore); DCHECK(!builtin_exit_frame); __ EnterApiExitFrame(arg_stack_space); // Move argc into r12 (argv is already in r15). __ movq(r12, rax); } else { - __ EnterExitFrame( - arg_stack_space, save_doubles == SaveFPRegsMode::kSave, - builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); + __ EnterExitFrame(arg_stack_space, builtin_exit_frame + ? StackFrame::BUILTIN_EXIT + : StackFrame::EXIT); } // rbx: pointer to builtin function (C callee-saved). @@ -4618,8 +4616,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, } // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, - argv_mode == ArgvMode::kStack); + __ LeaveExitFrame(argv_mode == ArgvMode::kStack); __ ret(0); // Handling of exception. diff --git a/src/codegen/arm/macro-assembler-arm.cc b/src/codegen/arm/macro-assembler-arm.cc index 488d87a260..4456110dbb 100644 --- a/src/codegen/arm/macro-assembler-arm.cc +++ b/src/codegen/arm/macro-assembler-arm.cc @@ -1467,7 +1467,7 @@ void TurboAssembler::AllocateStackSpace(int bytes) { } #endif -void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, +void MacroAssembler::EnterExitFrame(int stack_space, StackFrame::Type frame_type) { ASM_CODE_COMMENT(this); DCHECK(frame_type == StackFrame::EXIT || @@ -1496,15 +1496,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); str(cp, MemOperand(scratch)); - // Optionally save all double registers. - if (save_doubles) { - SaveFPRegs(sp, scratch); - // Note that d0 will be accessible at - // fp - ExitFrameConstants::kFrameSize - - // DwVfpRegister::kNumRegisters * kDoubleSize, - // since the sp slot and code slot were pushed after the fp. - } - // Reserve place for the return address and stack space and align the frame // preparing for calling the runtime function. const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); @@ -1536,21 +1527,13 @@ int TurboAssembler::ActivationFrameAlignment() { #endif // V8_HOST_ARCH_ARM } -void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, +void MacroAssembler::LeaveExitFrame(Register argument_count, bool argument_count_is_length) { ASM_CODE_COMMENT(this); ConstantPoolUnavailableScope constant_pool_unavailable(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - // Optionally restore all double registers. - if (save_doubles) { - // Calculate the stack location of the saved doubles and restore them. - const int offset = ExitFrameConstants::kFixedFrameSizeFromFp; - sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize)); - RestoreFPRegs(r3, scratch); - } - // Clear top frame. mov(r3, Operand::Zero()); Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, @@ -2094,8 +2077,8 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( TailCallOptimizedCodeSlot(this, optimized_code_entry, r6); } -void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles) { +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments) { ASM_CODE_COMMENT(this); // All parameters are on the stack. r0 has the return value after call. @@ -2110,8 +2093,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, // smarter. mov(r0, Operand(num_arguments)); Move(r1, ExternalReference::Create(f)); - Handle code = - CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Handle code = CodeFactory::CEntry(isolate(), f->result_size); Call(code, RelocInfo::CODE_TARGET); } @@ -2136,8 +2118,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, DCHECK_EQ(builtin.address() & 1, 1); #endif Move(r1, builtin); - Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, builtin_exit_frame); + Handle code = + CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET); } diff --git a/src/codegen/arm/macro-assembler-arm.h b/src/codegen/arm/macro-assembler-arm.h index 51f7907581..cb58091577 100644 --- a/src/codegen/arm/macro-assembler-arm.h +++ b/src/codegen/arm/macro-assembler-arm.h @@ -661,14 +661,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. - void EnterExitFrame(bool save_doubles, int stack_space = 0, - StackFrame::Type frame_type = StackFrame::EXIT); + void EnterExitFrame(int stack_space, StackFrame::Type frame_type); // Leave the current exit frame. Expects the return value in r0. // Expect the number of values, pushed prior to the exit frame, to // remove in a register (or no_reg, if there is nothing to remove). - void LeaveExitFrame(bool save_doubles, Register argument_count, - bool argument_count_is_length = false); + void LeaveExitFrame(Register argument_count, bool argument_count_is_length); // Load the global proxy from the current context. void LoadGlobalProxy(Register dst); @@ -786,20 +784,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Runtime calls // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); + void CallRuntime(const Runtime::Function* f, int num_arguments); // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { + void CallRuntime(Runtime::FunctionId fid) { const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, save_doubles); + CallRuntime(function, function->nargs); } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); + void CallRuntime(Runtime::FunctionId fid, int num_arguments) { + CallRuntime(Runtime::FunctionForId(fid), num_arguments); } // Convenience function: tail call a runtime routine (jump). diff --git a/src/codegen/arm64/macro-assembler-arm64.cc b/src/codegen/arm64/macro-assembler-arm64.cc index 0c03d34ba4..4c5894ef5e 100644 --- a/src/codegen/arm64/macro-assembler-arm64.cc +++ b/src/codegen/arm64/macro-assembler-arm64.cc @@ -1815,8 +1815,8 @@ void TurboAssembler::Swap(VRegister lhs, VRegister rhs) { Mov(lhs, temp); } -void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles) { +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments) { ASM_CODE_COMMENT(this); // All arguments must be on the stack before this function is called. // x0 holds the return value after the call. @@ -1829,8 +1829,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, Mov(x0, num_arguments); Mov(x1, ExternalReference::Create(f)); - Handle code = - CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Handle code = CodeFactory::CEntry(isolate(), f->result_size); Call(code, RelocInfo::CODE_TARGET); } @@ -1839,8 +1838,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, ASM_CODE_COMMENT(this); Mov(x1, builtin); Handle code = - CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, builtin_exit_frame); + CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET); } @@ -2904,30 +2902,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { Pop(fp, lr); } -void MacroAssembler::ExitFramePreserveFPRegs() { - ASM_CODE_COMMENT(this); - DCHECK_EQ(kCallerSavedV.Count() % 2, 0); - PushCPURegList(kCallerSavedV); -} - -void MacroAssembler::ExitFrameRestoreFPRegs() { - // Read the registers from the stack without popping them. The stack pointer - // will be reset as part of the unwinding process. - ASM_CODE_COMMENT(this); - CPURegList saved_fp_regs = kCallerSavedV; - DCHECK_EQ(saved_fp_regs.Count() % 2, 0); - - int offset = ExitFrameConstants::kLastExitFrameField; - while (!saved_fp_regs.IsEmpty()) { - const CPURegister& dst0 = saved_fp_regs.PopHighestIndex(); - const CPURegister& dst1 = saved_fp_regs.PopHighestIndex(); - offset -= 2 * kDRegSize; - Ldp(dst1, dst0, MemOperand(fp, offset)); - } -} - -void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch, - int extra_space, +void MacroAssembler::EnterExitFrame(const Register& scratch, int extra_space, StackFrame::Type frame_type) { ASM_CODE_COMMENT(this); DCHECK(frame_type == StackFrame::EXIT || @@ -2960,9 +2935,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch, static_assert((-2 * kSystemPointerSize) == ExitFrameConstants::kLastExitFrameField); - if (save_doubles) { - ExitFramePreserveFPRegs(); - } // Round the number of space we need to claim to a multiple of two. int slots_to_claim = RoundUp(extra_space + 1, 2); @@ -2975,7 +2947,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch, // fp -> fp[0]: CallerFP (old fp) // fp[-8]: STUB marker // fp[-16]: Space reserved for SPOffset. - // fp[-16 - fp_size]: Saved doubles (if save_doubles is true). // sp[8]: Extra space reserved for caller (if extra_space != 0). // sp -> sp[0]: Space reserved for the return address. @@ -2988,13 +2959,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch, } // Leave the current exit frame. -void MacroAssembler::LeaveExitFrame(bool restore_doubles, - const Register& scratch, +void MacroAssembler::LeaveExitFrame(const Register& scratch, const Register& scratch2) { ASM_CODE_COMMENT(this); - if (restore_doubles) { - ExitFrameRestoreFPRegs(); - } // Restore the context pointer from the top frame. Mov(scratch, diff --git a/src/codegen/arm64/macro-assembler-arm64.h b/src/codegen/arm64/macro-assembler-arm64.h index beac877810..bc848778eb 100644 --- a/src/codegen/arm64/macro-assembler-arm64.h +++ b/src/codegen/arm64/macro-assembler-arm64.h @@ -1908,47 +1908,47 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { AbortReason reason = AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE - // Abort execution if argument is not a CodeT, enabled via --debug-code. - void AssertCodeT(Register object) NOOP_UNLESS_DEBUG_CODE + // Abort execution if argument is not a CodeT, enabled via --debug-code. + void AssertCodeT(Register object) NOOP_UNLESS_DEBUG_CODE - // Abort execution if argument is not a Constructor, enabled via --debug-code. - void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE + // Abort execution if argument is not a Constructor, enabled via + // --debug-code. + void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE - // Abort execution if argument is not a JSFunction, enabled via --debug-code. - void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE + // Abort execution if argument is not a JSFunction, enabled via + // --debug-code. + void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE - // Abort execution if argument is not a callable JSFunction, enabled via - // --debug-code. - void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE + // Abort execution if argument is not a callable JSFunction, enabled via + // --debug-code. + void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE - // Abort execution if argument is not a JSGeneratorObject (or subclass), - // enabled via --debug-code. - void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE + // Abort execution if argument is not a JSGeneratorObject (or subclass), + // enabled via --debug-code. + void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE - // Abort execution if argument is not a JSBoundFunction, - // enabled via --debug-code. - void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE + // Abort execution if argument is not a JSBoundFunction, + // enabled via --debug-code. + void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE - // Abort execution if argument is not undefined or an AllocationSite, enabled - // via --debug-code. - void AssertUndefinedOrAllocationSite(Register object) NOOP_UNLESS_DEBUG_CODE + // Abort execution if argument is not undefined or an AllocationSite, + // enabled via --debug-code. + void AssertUndefinedOrAllocationSite(Register object) + NOOP_UNLESS_DEBUG_CODE - // ---- Calling / Jumping helpers ---- + // ---- Calling / Jumping helpers ---- - void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); + void CallRuntime(const Runtime::Function* f, int num_arguments); // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); + void CallRuntime(Runtime::FunctionId fid, int num_arguments) { + CallRuntime(Runtime::FunctionForId(fid), num_arguments); } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { + void CallRuntime(Runtime::FunctionId fid) { const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, save_doubles); + CallRuntime(function, function->nargs); } void TailCallRuntime(Runtime::FunctionId fid); @@ -2051,9 +2051,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // --------------------------------------------------------------------------- // Frames. - void ExitFramePreserveFPRegs(); - void ExitFrameRestoreFPRegs(); - // Enter exit frame. Exit frames are used when calling C code from generated // (JavaScript) code. // @@ -2076,19 +2073,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // // This function also stores the new frame information in the top frame, so // that the new frame becomes the current frame. - void EnterExitFrame(bool save_doubles, const Register& scratch, - int extra_space = 0, - StackFrame::Type frame_type = StackFrame::EXIT); + void EnterExitFrame(const Register& scratch, int extra_space, + StackFrame::Type frame_type); // Leave the current exit frame, after a C function has returned to generated // (JavaScript) code. // // This effectively unwinds the operation of EnterExitFrame: - // * Preserved doubles are restored (if restore_doubles is true). // * The frame information is removed from the top frame. // * The exit frame is dropped. - void LeaveExitFrame(bool save_doubles, const Register& scratch, - const Register& scratch2); + void LeaveExitFrame(const Register& scratch, const Register& scratch2); // Load the global proxy from the current context. void LoadGlobalProxy(Register dst); diff --git a/src/codegen/code-factory.cc b/src/codegen/code-factory.cc index c611445512..01be4f3150 100644 --- a/src/codegen/code-factory.cc +++ b/src/codegen/code-factory.cc @@ -18,56 +18,31 @@ Handle CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) { return CodeFactory::CEntry(isolate, result_size); } -#define CENTRY_CODE(RS, SD, AM, BE) \ - BUILTIN_CODE(isolate, CEntry_##RS##_##SD##_##AM##_##BE) - // static Handle CodeFactory::CEntry(Isolate* isolate, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, bool builtin_exit_frame) { // Aliases for readability below. const int rs = result_size; - const SaveFPRegsMode sd = save_doubles; const ArgvMode am = argv_mode; const bool be = builtin_exit_frame; - if (rs == 1 && sd == SaveFPRegsMode::kIgnore && am == ArgvMode::kStack && - !be) { - return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvOnStack, NoBuiltinExit); - } else if (rs == 1 && sd == SaveFPRegsMode::kIgnore && - am == ArgvMode::kStack && be) { - return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvOnStack, BuiltinExit); - } else if (rs == 1 && sd == SaveFPRegsMode::kIgnore && - am == ArgvMode::kRegister && !be) { - return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvInRegister, NoBuiltinExit); - } else if (rs == 1 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack && - !be) { - return CENTRY_CODE(Return1, SaveFPRegs, ArgvOnStack, NoBuiltinExit); - } else if (rs == 1 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack && - be) { - return CENTRY_CODE(Return1, SaveFPRegs, ArgvOnStack, BuiltinExit); - } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore && - am == ArgvMode::kStack && !be) { - return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvOnStack, NoBuiltinExit); - } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore && - am == ArgvMode::kStack && be) { - return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvOnStack, BuiltinExit); - } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore && - am == ArgvMode::kRegister && !be) { - return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvInRegister, NoBuiltinExit); - } else if (rs == 2 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack && - !be) { - return CENTRY_CODE(Return2, SaveFPRegs, ArgvOnStack, NoBuiltinExit); - } else if (rs == 2 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack && - be) { - return CENTRY_CODE(Return2, SaveFPRegs, ArgvOnStack, BuiltinExit); + if (rs == 1 && am == ArgvMode::kStack && !be) { + return BUILTIN_CODE(isolate, CEntry_Return1_ArgvOnStack_NoBuiltinExit); + } else if (rs == 1 && am == ArgvMode::kStack && be) { + return BUILTIN_CODE(isolate, CEntry_Return1_ArgvOnStack_BuiltinExit); + } else if (rs == 1 && am == ArgvMode::kRegister && !be) { + return BUILTIN_CODE(isolate, CEntry_Return1_ArgvInRegister_NoBuiltinExit); + } else if (rs == 2 && am == ArgvMode::kStack && !be) { + return BUILTIN_CODE(isolate, CEntry_Return2_ArgvOnStack_NoBuiltinExit); + } else if (rs == 2 && am == ArgvMode::kStack && be) { + return BUILTIN_CODE(isolate, CEntry_Return2_ArgvOnStack_BuiltinExit); + } else if (rs == 2 && am == ArgvMode::kRegister && !be) { + return BUILTIN_CODE(isolate, CEntry_Return2_ArgvInRegister_NoBuiltinExit); } UNREACHABLE(); } -#undef CENTRY_CODE - // static Callable CodeFactory::ApiGetter(Isolate* isolate) { return Builtins::CallableFor(isolate, Builtin::kCallApiGetter); @@ -279,10 +254,8 @@ Callable CodeFactory::InterpreterPushArgsThenConstruct( // static Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) { - // Note: If we ever use fpregs in the interpreter then we will need to - // save fpregs too. - Handle code = CodeFactory::CEntry( - isolate, result_size, SaveFPRegsMode::kIgnore, ArgvMode::kRegister); + Handle code = + CodeFactory::CEntry(isolate, result_size, ArgvMode::kRegister); if (result_size == 1) { return Callable(code, InterpreterCEntry1Descriptor{}); } else { diff --git a/src/codegen/code-factory.h b/src/codegen/code-factory.h index 937ad2e5b4..73629f5f04 100644 --- a/src/codegen/code-factory.h +++ b/src/codegen/code-factory.h @@ -28,10 +28,9 @@ class V8_EXPORT_PRIVATE CodeFactory final { // is exported here. static Handle RuntimeCEntry(Isolate* isolate, int result_size = 1); - static Handle CEntry( - Isolate* isolate, int result_size = 1, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore, - ArgvMode argv_mode = ArgvMode::kStack, bool builtin_exit_frame = false); + static Handle CEntry(Isolate* isolate, int result_size = 1, + ArgvMode argv_mode = ArgvMode::kStack, + bool builtin_exit_frame = false); // Initial states for ICs. static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode); diff --git a/src/codegen/ia32/macro-assembler-ia32.cc b/src/codegen/ia32/macro-assembler-ia32.cc index ceaf19587d..0f65c7bfc4 100644 --- a/src/codegen/ia32/macro-assembler-ia32.cc +++ b/src/codegen/ia32/macro-assembler-ia32.cc @@ -1163,21 +1163,10 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type, mov(ExternalReferenceAsOperand(c_function_address, scratch), edx); } -void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { +void MacroAssembler::EnterExitFrameEpilogue(int argc) { ASM_CODE_COMMENT(this); - // Optionally save all XMM registers. - if (save_doubles) { - int space = - XMMRegister::kNumRegisters * kDoubleSize + argc * kSystemPointerSize; - AllocateStackSpace(space); - const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp; - for (int i = 0; i < XMMRegister::kNumRegisters; i++) { - XMMRegister reg = XMMRegister::from_code(i); - movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); - } - } else { - AllocateStackSpace(argc * kSystemPointerSize); - } + + AllocateStackSpace(argc * kSystemPointerSize); // Get the required frame alignment for the OS. const int kFrameAlignment = base::OS::ActivationFrameAlignment(); @@ -1190,8 +1179,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp); } -void MacroAssembler::EnterExitFrame(int argc, bool save_doubles, - StackFrame::Type frame_type) { +void MacroAssembler::EnterExitFrame(int argc, StackFrame::Type frame_type) { ASM_CODE_COMMENT(this); EnterExitFramePrologue(frame_type, edi); @@ -1201,25 +1189,16 @@ void MacroAssembler::EnterExitFrame(int argc, bool save_doubles, lea(esi, Operand(ebp, eax, times_system_pointer_size, offset)); // Reserve space for argc, argv and isolate. - EnterExitFrameEpilogue(argc, save_doubles); + EnterExitFrameEpilogue(argc); } void MacroAssembler::EnterApiExitFrame(int argc, Register scratch) { EnterExitFramePrologue(StackFrame::EXIT, scratch); - EnterExitFrameEpilogue(argc, false); + EnterExitFrameEpilogue(argc); } -void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) { +void MacroAssembler::LeaveExitFrame(bool pop_arguments) { ASM_CODE_COMMENT(this); - // Optionally restore all XMM registers. - if (save_doubles) { - const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp; - for (int i = 0; i < XMMRegister::kNumRegisters; i++) { - XMMRegister reg = XMMRegister::from_code(i); - movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); - } - } - if (pop_arguments) { // Get the return address from the stack and restore the frame pointer. mov(ecx, Operand(ebp, 1 * kSystemPointerSize)); @@ -1291,8 +1270,8 @@ void MacroAssembler::PopStackHandler(Register scratch) { add(esp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize)); } -void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles) { +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments) { ASM_CODE_COMMENT(this); // If the expected number of arguments of the runtime function is // constant, we check that the actual number of arguments match the @@ -1305,8 +1284,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, // smarter. Move(kRuntimeCallArgCountRegister, Immediate(num_arguments)); Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f))); - Handle code = - CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Handle code = CodeFactory::CEntry(isolate(), f->result_size); Call(code, RelocInfo::CODE_TARGET); } @@ -1338,8 +1316,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext, ASM_CODE_COMMENT(this); // Set the entry point and jump to the C entry runtime stub. Move(kRuntimeCallFunctionRegister, Immediate(ext)); - Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, builtin_exit_frame); + Handle code = + CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET); } diff --git a/src/codegen/ia32/macro-assembler-ia32.h b/src/codegen/ia32/macro-assembler-ia32.h index a55beb1a4e..2332c47abf 100644 --- a/src/codegen/ia32/macro-assembler-ia32.h +++ b/src/codegen/ia32/macro-assembler-ia32.h @@ -466,14 +466,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // arguments in register eax and sets up the number of arguments in // register edi and the pointer to the first argument in register // esi. - void EnterExitFrame(int argc, bool save_doubles, StackFrame::Type frame_type); + void EnterExitFrame(int argc, StackFrame::Type frame_type); void EnterApiExitFrame(int argc, Register scratch); // Leave the current exit frame. Expects the return value in // register eax:edx (untouched) and the pointer to the first // argument in register esi (if pop_arguments == true). - void LeaveExitFrame(bool save_doubles, bool pop_arguments = true); + void LeaveExitFrame(bool pop_arguments); // Leave the current exit frame. Expects the return value in // register eax (untouched). @@ -610,20 +610,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Runtime calls // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); + void CallRuntime(const Runtime::Function* f, int num_arguments); // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { + void CallRuntime(Runtime::FunctionId fid) { const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, save_doubles); + CallRuntime(function, function->nargs); } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); + void CallRuntime(Runtime::FunctionId fid, int num_arguments) { + CallRuntime(Runtime::FunctionForId(fid), num_arguments); } // Convenience function: tail call a runtime routine (jump). @@ -674,7 +671,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { InvokeType type); void EnterExitFramePrologue(StackFrame::Type frame_type, Register scratch); - void EnterExitFrameEpilogue(int argc, bool save_doubles); + void EnterExitFrameEpilogue(int argc); void LeaveExitFrameEpilogue(); diff --git a/src/codegen/x64/macro-assembler-x64.cc b/src/codegen/x64/macro-assembler-x64.cc index 0c4cf625e4..6c5ae64da4 100644 --- a/src/codegen/x64/macro-assembler-x64.cc +++ b/src/codegen/x64/macro-assembler-x64.cc @@ -734,8 +734,8 @@ void TurboAssembler::Abort(AbortReason reason) { int3(); } -void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles) { +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments) { ASM_CODE_COMMENT(this); // If the expected number of arguments of the runtime function is // constant, we check that the actual number of arguments match the @@ -748,8 +748,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, // smarter. Move(rax, num_arguments); LoadAddress(rbx, ExternalReference::Create(f)); - Handle code = - CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Handle code = CodeFactory::CEntry(isolate(), f->result_size); Call(code, RelocInfo::CODE_TARGET); } @@ -778,8 +777,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext, // Set the entry point and jump to the C entry runtime stub. LoadAddress(rbx, ext); Handle code = - CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, builtin_exit_frame); + CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET); } @@ -3208,27 +3206,14 @@ static const int kRegisterPassedArguments = 4; static const int kRegisterPassedArguments = 6; #endif -void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, - bool save_doubles) { +void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space) { ASM_CODE_COMMENT(this); + #ifdef V8_TARGET_OS_WIN arg_stack_space += kRegisterPassedArguments; #endif - // Optionally save all XMM registers. - if (save_doubles) { - int space = XMMRegister::kNumRegisters * kDoubleSize + - arg_stack_space * kSystemPointerSize; - AllocateStackSpace(space); - int offset = -ExitFrameConstants::kFixedFrameSizeFromFp; - const RegisterConfiguration* config = RegisterConfiguration::Default(); - for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { - DoubleRegister reg = - DoubleRegister::from_code(config->GetAllocatableDoubleCode(i)); - Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); - } - } else if (arg_stack_space > 0) { - AllocateStackSpace(arg_stack_space * kSystemPointerSize); - } + + AllocateStackSpace(arg_stack_space * kSystemPointerSize); // Get the required frame alignment for the OS. const int kFrameAlignment = base::OS::ActivationFrameAlignment(); @@ -3242,7 +3227,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp); } -void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles, +void MacroAssembler::EnterExitFrame(int arg_stack_space, StackFrame::Type frame_type) { ASM_CODE_COMMENT(this); Register saved_rax_reg = r12; @@ -3253,29 +3238,19 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles, int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize; leaq(r15, Operand(rbp, saved_rax_reg, times_system_pointer_size, offset)); - EnterExitFrameEpilogue(arg_stack_space, save_doubles); + EnterExitFrameEpilogue(arg_stack_space); } void MacroAssembler::EnterApiExitFrame(int arg_stack_space) { ASM_CODE_COMMENT(this); EnterExitFramePrologue(no_reg, StackFrame::EXIT); - EnterExitFrameEpilogue(arg_stack_space, false); + EnterExitFrameEpilogue(arg_stack_space); } -void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) { +void MacroAssembler::LeaveExitFrame(bool pop_arguments) { ASM_CODE_COMMENT(this); // Registers: // r15 : argv - if (save_doubles) { - int offset = -ExitFrameConstants::kFixedFrameSizeFromFp; - const RegisterConfiguration* config = RegisterConfiguration::Default(); - for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { - DoubleRegister reg = - DoubleRegister::from_code(config->GetAllocatableDoubleCode(i)); - Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize))); - } - } - if (pop_arguments) { // Get the return address from the stack and restore the frame pointer. movq(rcx, Operand(rbp, kFPOnStackSize)); diff --git a/src/codegen/x64/macro-assembler-x64.h b/src/codegen/x64/macro-assembler-x64.h index 0b902cc6dd..0f1818c17a 100644 --- a/src/codegen/x64/macro-assembler-x64.h +++ b/src/codegen/x64/macro-assembler-x64.h @@ -737,8 +737,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // // Allocates arg_stack_space * kSystemPointerSize memory (not GCed) on the // stack accessible via StackSpaceOperand. - void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false, - StackFrame::Type frame_type = StackFrame::EXIT); + void EnterExitFrame(int arg_stack_space, StackFrame::Type frame_type); // Enter specific kind of exit frame. Allocates // (arg_stack_space * kSystemPointerSize) memory (not GCed) on the stack @@ -748,7 +747,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Leave the current exit frame. Expects/provides the return value in // register rax:rdx (untouched) and the pointer to the first // argument in register rsi (if pop_arguments == true). - void LeaveExitFrame(bool save_doubles = false, bool pop_arguments = true); + void LeaveExitFrame(bool pop_arguments); // Leave the current exit frame. Expects/provides the return value in // register rax (untouched). @@ -900,20 +899,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Runtime calls // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); + void CallRuntime(const Runtime::Function* f, int num_arguments); // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { + void CallRuntime(Runtime::FunctionId fid) { const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, save_doubles); + CallRuntime(function, function->nargs); } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); + void CallRuntime(Runtime::FunctionId fid, int num_arguments) { + CallRuntime(Runtime::FunctionForId(fid), num_arguments); } // Convenience function: tail call a runtime routine (jump) @@ -957,7 +953,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Allocates arg_stack_space * kSystemPointerSize memory (not GCed) on the // stack accessible via StackSpaceOperand. - void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles); + void EnterExitFrameEpilogue(int arg_stack_space); void LeaveExitFrameEpilogue(); diff --git a/src/common/globals.h b/src/common/globals.h index 1be647e09f..517c6e259f 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -675,8 +675,11 @@ enum class StoreOrigin { kMaybeKeyed, kNamed }; enum class TypeofMode { kInside, kNotInside }; -// Enums used by CEntry. +// Whether floating point registers should be saved (and restored). enum class SaveFPRegsMode { kIgnore, kSave }; + +// Whether arguments are passed on a known stack location or through a +// register. enum class ArgvMode { kStack, kRegister }; // This constant is used as an undefined value when passing source positions. diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc index 0331bb1782..3a757c6fdb 100644 --- a/src/compiler/js-call-reducer.cc +++ b/src/compiler/js-call-reducer.cc @@ -5972,8 +5972,8 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver, Builtins::name(builtin), node->op()->properties(), CallDescriptor::kNeedsFrameState); - Node* stub_code = jsgraph()->CEntryStubConstant( - 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true); + Node* stub_code = + jsgraph()->CEntryStubConstant(1, ArgvMode::kStack, true); Address builtin_entry = Builtins::CppEntryOf(builtin); Node* entry = jsgraph()->ExternalConstant( ExternalReference::Create(builtin_entry)); diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc index a63913348d..bc4fae40f5 100644 --- a/src/compiler/js-graph.cc +++ b/src/compiler/js-graph.cc @@ -16,10 +16,9 @@ namespace compiler { #define DEFINE_GETTER(name, expr) \ Node* JSGraph::name() { return GET_CACHED_FIELD(&name##_, expr); } -Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles, - ArgvMode argv_mode, bool builtin_exit_frame) { - if (save_doubles == SaveFPRegsMode::kIgnore && - argv_mode == ArgvMode::kStack) { +Node* JSGraph::CEntryStubConstant(int result_size, ArgvMode argv_mode, + bool builtin_exit_frame) { + if (argv_mode == ArgvMode::kStack) { DCHECK(result_size >= 1 && result_size <= 3); if (!builtin_exit_frame) { Node** ptr = nullptr; @@ -31,18 +30,18 @@ Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles, DCHECK_EQ(3, result_size); ptr = &CEntryStub3Constant_; } - return GET_CACHED_FIELD(ptr, HeapConstant(CodeFactory::CEntry( - isolate(), result_size, save_doubles, - argv_mode, builtin_exit_frame))); + return GET_CACHED_FIELD( + ptr, HeapConstant(CodeFactory::CEntry( + isolate(), result_size, argv_mode, builtin_exit_frame))); } Node** ptr = builtin_exit_frame ? &CEntryStub1WithBuiltinExitFrameConstant_ : &CEntryStub1Constant_; - return GET_CACHED_FIELD(ptr, HeapConstant(CodeFactory::CEntry( - isolate(), result_size, save_doubles, - argv_mode, builtin_exit_frame))); + return GET_CACHED_FIELD( + ptr, HeapConstant(CodeFactory::CEntry(isolate(), result_size, argv_mode, + builtin_exit_frame))); } - return HeapConstant(CodeFactory::CEntry(isolate(), result_size, save_doubles, - argv_mode, builtin_exit_frame)); + return HeapConstant(CodeFactory::CEntry(isolate(), result_size, argv_mode, + builtin_exit_frame)); } Node* JSGraph::Constant(const ObjectRef& ref) { diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h index a06411f719..181070f024 100644 --- a/src/compiler/js-graph.h +++ b/src/compiler/js-graph.h @@ -37,9 +37,9 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph { JSGraph& operator=(const JSGraph&) = delete; // CEntryStubs are cached depending on the result size and other flags. - Node* CEntryStubConstant( - int result_size, SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore, - ArgvMode argv_mode = ArgvMode::kStack, bool builtin_exit_frame = false); + Node* CEntryStubConstant(int result_size, + ArgvMode argv_mode = ArgvMode::kStack, + bool builtin_exit_frame = false); // Used for padding frames. (alias: the hole) Node* PaddingConstant() { return TheHoleConstant(); } diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc index 28053c35ac..41cbd35d57 100644 --- a/src/compiler/js-typed-lowering.cc +++ b/src/compiler/js-typed-lowering.cc @@ -1604,8 +1604,8 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, Builtin builtin, int arity, DCHECK(Builtins::IsCpp(builtin)); const bool has_builtin_exit_frame = true; - Node* stub = jsgraph->CEntryStubConstant( - 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, has_builtin_exit_frame); + Node* stub = + jsgraph->CEntryStubConstant(1, ArgvMode::kStack, has_builtin_exit_frame); node->ReplaceInput(0, stub); const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver; diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index ca164384f5..22b7379e6b 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -3284,8 +3284,7 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f, // Isolate independent. At the moment this is only done for CEntryStub(1). Node* isolate_root = BuildLoadIsolateRoot(); DCHECK_EQ(1, fun->result_size); - auto centry_id = - Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit; + auto centry_id = Builtin::kCEntry_Return1_ArgvOnStack_NoBuiltinExit; int builtin_slot_offset = IsolateData::BuiltinSlotOffset(centry_id); Node* centry_stub = gasm_->LoadFromObject(MachineType::Pointer(), isolate_root, builtin_slot_offset); diff --git a/src/debug/debug-evaluate.cc b/src/debug/debug-evaluate.cc index 8d1ecb80af..8f893d4087 100644 --- a/src/debug/debug-evaluate.cc +++ b/src/debug/debug-evaluate.cc @@ -1133,16 +1133,12 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller, case Builtin::kCall_ReceiverIsNotNullOrUndefined: case Builtin::kCall_ReceiverIsNullOrUndefined: case Builtin::kCallWithArrayLike: - case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit: - case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit: - case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit: - case Builtin::kCEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit: - case Builtin::kCEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit: - case Builtin::kCEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit: - case Builtin::kCEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit: - case Builtin::kCEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit: - case Builtin::kCEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit: - case Builtin::kCEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit: + case Builtin::kCEntry_Return1_ArgvOnStack_NoBuiltinExit: + case Builtin::kCEntry_Return1_ArgvOnStack_BuiltinExit: + case Builtin::kCEntry_Return1_ArgvInRegister_NoBuiltinExit: + case Builtin::kCEntry_Return2_ArgvOnStack_NoBuiltinExit: + case Builtin::kCEntry_Return2_ArgvOnStack_BuiltinExit: + case Builtin::kCEntry_Return2_ArgvInRegister_NoBuiltinExit: case Builtin::kCloneFastJSArray: case Builtin::kConstruct: case Builtin::kConvertToLocaleString: From 2f852102d9cdc891659866728d6b1420c6e32f42 Mon Sep 17 00:00:00 2001 From: Manos Koukoutos Date: Tue, 20 Dec 2022 11:37:35 +0100 Subject: [PATCH 004/654] [wasm-gc] Final types We add final types to wasm-gc. - We introduce a `kWasmSubtypeFinalCode` as an alternative to `kWasmSubtypeCode`. - Behind a flag, we interpret types behind this code as final, as well as types outside a subtype definition by default. - For final types, type checks for call_indirect and ref.test etc. are reduced to simple type identity checks. Bug: v8:7748 Change-Id: Iabf147b2a15f43abc4c7d1c582f460dbdc645d66 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4096478 Reviewed-by: Matthias Liedtke Reviewed-by: Clemens Backes Commit-Queue: Manos Koukoutos Cr-Commit-Position: refs/heads/main@{#84955} --- src/asmjs/asm-parser.cc | 2 +- src/compiler/wasm-compiler.cc | 7 +- src/compiler/wasm-gc-lowering.cc | 137 ++++++++++-------- src/flags/flag-definitions.h | 2 + src/wasm/baseline/liftoff-compiler.cc | 3 +- src/wasm/canonical-types.cc | 13 +- src/wasm/canonical-types.h | 2 + src/wasm/module-decoder-impl.h | 23 ++- src/wasm/wasm-constants.h | 1 + src/wasm/wasm-module-builder.cc | 30 ++-- src/wasm/wasm-module-builder.h | 10 +- src/wasm/wasm-module.h | 56 ++++--- src/wasm/wasm-subtyping.cc | 9 +- test/cctest/wasm/test-gc.cc | 13 +- test/cctest/wasm/wasm-run-utils.h | 3 +- test/fuzzer/wasm-compile.cc | 23 +-- .../debugger/wasm-gc-anyref-expected.txt | 6 +- test/mjsunit/wasm/call-ref.js | 9 +- test/mjsunit/wasm/gc-casts-subtypes.js | 6 +- test/mjsunit/wasm/wasm-module-builder.js | 27 ++-- .../wasm/function-body-decoder-unittest.cc | 7 +- .../unittests/wasm/module-decoder-unittest.cc | 30 +++- test/unittests/wasm/subtyping-unittest.cc | 92 +++++++----- .../wasm-disassembler-unittest-gc.wasm.inc | 4 +- test/wasm-api-tests/table.cc | 2 +- 25 files changed, 314 insertions(+), 203 deletions(-) diff --git a/src/asmjs/asm-parser.cc b/src/asmjs/asm-parser.cc index d1c59ea601..9346be040b 100644 --- a/src/asmjs/asm-parser.cc +++ b/src/asmjs/asm-parser.cc @@ -2240,7 +2240,7 @@ AsmType* AsmJsParser::ValidateCall() { function_type->AsFunctionType()->AddArgument(t); } FunctionSig* sig = ConvertSignature(return_type, param_types); - uint32_t signature_index = module_builder_->AddSignature(sig); + uint32_t signature_index = module_builder_->AddSignature(sig, true); // Emit actual function invocation depending on the kind. At this point we // also determined the complete function type and can perform checking against diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index 22b7379e6b..8e8e45247a 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -2849,8 +2849,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, LoadIndirectFunctionTable(table_index, &ift_size, &ift_sig_ids, &ift_targets, &ift_instances); - const wasm::FunctionSig* sig = env_->module->signature(sig_index); - Node* key = args[0]; // Bounds check against the table size. @@ -2869,7 +2867,8 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, int32_scaled_key); Node* sig_match = gasm_->Word32Equal(loaded_sig, expected_sig_id); - if (v8_flags.experimental_wasm_gc) { + if (v8_flags.experimental_wasm_gc && + !env_->module->types[sig_index].is_final) { // Do a full subtyping check. // TODO(7748): Optimize for non-nullable tables. // TODO(7748): Optimize if type annotation matches table type. @@ -2938,6 +2937,8 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, args[0] = target; + const wasm::FunctionSig* sig = env_->module->signature(sig_index); + switch (continuation) { case kCallContinues: return BuildWasmCall(sig, args, rets, position, target_instance); diff --git a/src/compiler/wasm-gc-lowering.cc b/src/compiler/wasm-gc-lowering.cc index 0ab9e1aa4b..67e7108262 100644 --- a/src/compiler/wasm-gc-lowering.cc +++ b/src/compiler/wasm-gc-lowering.cc @@ -121,41 +121,45 @@ Reduction WasmGCLowering::ReduceWasmTypeCheck(Node* node) { Node* map = gasm_.LoadMap(object); - // First, check if types happen to be equal. This has been shown to give large - // speedups. - gasm_.GotoIf(gasm_.TaggedEqual(map, rtt), &end_label, BranchHint::kTrue, - gasm_.Int32Constant(1)); + if (module_->types[config.to.ref_index()].is_final) { + gasm_.Goto(&end_label, gasm_.TaggedEqual(map, rtt)); + } else { + // First, check if types happen to be equal. This has been shown to give + // large speedups. + gasm_.GotoIf(gasm_.TaggedEqual(map, rtt), &end_label, BranchHint::kTrue, + gasm_.Int32Constant(1)); - // Check if map instance type identifies a wasm object. - if (is_cast_from_any) { - Node* is_wasm_obj = gasm_.IsDataRefMap(map); - gasm_.GotoIfNot(is_wasm_obj, &end_label, BranchHint::kTrue, - gasm_.Int32Constant(0)); + // Check if map instance type identifies a wasm object. + if (is_cast_from_any) { + Node* is_wasm_obj = gasm_.IsDataRefMap(map); + gasm_.GotoIfNot(is_wasm_obj, &end_label, BranchHint::kTrue, + gasm_.Int32Constant(0)); + } + + Node* type_info = gasm_.LoadWasmTypeInfo(map); + DCHECK_GE(rtt_depth, 0); + // If the depth of the rtt is known to be less that the minimum supertype + // array length, we can access the supertype without bounds-checking the + // supertype array. + if (static_cast(rtt_depth) >= wasm::kMinimumSupertypeArraySize) { + Node* supertypes_length = + gasm_.BuildChangeSmiToIntPtr(gasm_.LoadImmutableFromObject( + MachineType::TaggedSigned(), type_info, + wasm::ObjectAccess::ToTagged( + WasmTypeInfo::kSupertypesLengthOffset))); + gasm_.GotoIfNot(gasm_.UintLessThan(gasm_.IntPtrConstant(rtt_depth), + supertypes_length), + &end_label, BranchHint::kTrue, gasm_.Int32Constant(0)); + } + + Node* maybe_match = gasm_.LoadImmutableFromObject( + MachineType::TaggedPointer(), type_info, + wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset + + kTaggedSize * rtt_depth)); + + gasm_.Goto(&end_label, gasm_.TaggedEqual(maybe_match, rtt)); } - Node* type_info = gasm_.LoadWasmTypeInfo(map); - DCHECK_GE(rtt_depth, 0); - // If the depth of the rtt is known to be less that the minimum supertype - // array length, we can access the supertype without bounds-checking the - // supertype array. - if (static_cast(rtt_depth) >= wasm::kMinimumSupertypeArraySize) { - Node* supertypes_length = - gasm_.BuildChangeSmiToIntPtr(gasm_.LoadImmutableFromObject( - MachineType::TaggedSigned(), type_info, - wasm::ObjectAccess::ToTagged( - WasmTypeInfo::kSupertypesLengthOffset))); - gasm_.GotoIfNot( - gasm_.UintLessThan(gasm_.IntPtrConstant(rtt_depth), supertypes_length), - &end_label, BranchHint::kTrue, gasm_.Int32Constant(0)); - } - - Node* maybe_match = gasm_.LoadImmutableFromObject( - MachineType::TaggedPointer(), type_info, - wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset + - kTaggedSize * rtt_depth)); - - gasm_.Goto(&end_label, gasm_.TaggedEqual(maybe_match, rtt)); - gasm_.Bind(&end_label); ReplaceWithValue(node, end_label.PhiAt(0), gasm_.effect(), gasm_.control()); @@ -199,41 +203,46 @@ Reduction WasmGCLowering::ReduceWasmTypeCast(Node* node) { Node* map = gasm_.LoadMap(object); - // First, check if types happen to be equal. This has been shown to give large - // speedups. - gasm_.GotoIf(gasm_.TaggedEqual(map, rtt), &end_label, BranchHint::kTrue); + if (module_->types[config.to.ref_index()].is_final) { + gasm_.TrapUnless(gasm_.TaggedEqual(map, rtt), TrapId::kTrapIllegalCast); + gasm_.Goto(&end_label); + } else { + // First, check if types happen to be equal. This has been shown to give + // large speedups. + gasm_.GotoIf(gasm_.TaggedEqual(map, rtt), &end_label, BranchHint::kTrue); - // Check if map instance type identifies a wasm object. - if (is_cast_from_any) { - Node* is_wasm_obj = gasm_.IsDataRefMap(map); - gasm_.TrapUnless(is_wasm_obj, TrapId::kTrapIllegalCast); + // Check if map instance type identifies a wasm object. + if (is_cast_from_any) { + Node* is_wasm_obj = gasm_.IsDataRefMap(map); + gasm_.TrapUnless(is_wasm_obj, TrapId::kTrapIllegalCast); + } + + Node* type_info = gasm_.LoadWasmTypeInfo(map); + DCHECK_GE(rtt_depth, 0); + // If the depth of the rtt is known to be less that the minimum supertype + // array length, we can access the supertype without bounds-checking the + // supertype array. + if (static_cast(rtt_depth) >= wasm::kMinimumSupertypeArraySize) { + Node* supertypes_length = + gasm_.BuildChangeSmiToIntPtr(gasm_.LoadImmutableFromObject( + MachineType::TaggedSigned(), type_info, + wasm::ObjectAccess::ToTagged( + WasmTypeInfo::kSupertypesLengthOffset))); + gasm_.TrapUnless(gasm_.UintLessThan(gasm_.IntPtrConstant(rtt_depth), + supertypes_length), + TrapId::kTrapIllegalCast); + } + + Node* maybe_match = gasm_.LoadImmutableFromObject( + MachineType::TaggedPointer(), type_info, + wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset + + kTaggedSize * rtt_depth)); + + gasm_.TrapUnless(gasm_.TaggedEqual(maybe_match, rtt), + TrapId::kTrapIllegalCast); + gasm_.Goto(&end_label); } - Node* type_info = gasm_.LoadWasmTypeInfo(map); - DCHECK_GE(rtt_depth, 0); - // If the depth of the rtt is known to be less that the minimum supertype - // array length, we can access the supertype without bounds-checking the - // supertype array. - if (static_cast(rtt_depth) >= wasm::kMinimumSupertypeArraySize) { - Node* supertypes_length = - gasm_.BuildChangeSmiToIntPtr(gasm_.LoadImmutableFromObject( - MachineType::TaggedSigned(), type_info, - wasm::ObjectAccess::ToTagged( - WasmTypeInfo::kSupertypesLengthOffset))); - gasm_.TrapUnless( - gasm_.UintLessThan(gasm_.IntPtrConstant(rtt_depth), supertypes_length), - TrapId::kTrapIllegalCast); - } - - Node* maybe_match = gasm_.LoadImmutableFromObject( - MachineType::TaggedPointer(), type_info, - wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset + - kTaggedSize * rtt_depth)); - - gasm_.TrapUnless(gasm_.TaggedEqual(maybe_match, rtt), - TrapId::kTrapIllegalCast); - gasm_.Goto(&end_label); - gasm_.Bind(&end_label); ReplaceWithValue(node, object, gasm_.effect(), gasm_.control()); diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index 210357eecf..5ea43974f3 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -1173,6 +1173,8 @@ DEFINE_BOOL(trace_wasm_inlining, false, "trace wasm inlining") DEFINE_BOOL(trace_wasm_speculative_inlining, false, "trace wasm speculative inlining") DEFINE_BOOL(trace_wasm_typer, false, "trace wasm typer") +DEFINE_BOOL(wasm_final_types, false, + "enable final types as default for wasm-gc") DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_inlining) DEFINE_WEAK_IMPLICATION(experimental_wasm_gc, wasm_speculative_inlining) diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index 58e2fb689a..f1894df85d 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -7409,7 +7409,8 @@ class LiftoffCompiler { AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapFuncSigMismatch); __ DropValues(1); - if (v8_flags.experimental_wasm_gc) { + if (v8_flags.experimental_wasm_gc && + !decoder->module_->types[imm.sig_imm.index].is_final) { Label success_label; FREEZE_STATE(frozen); __ emit_cond_jump(kEqual, &success_label, kI32, real_sig_id, diff --git a/src/wasm/canonical-types.cc b/src/wasm/canonical-types.cc index fa183c74e2..fc50d6ec54 100644 --- a/src/wasm/canonical-types.cc +++ b/src/wasm/canonical-types.cc @@ -68,7 +68,8 @@ uint32_t TypeCanonicalizer::AddRecursiveGroup(const FunctionSig* sig) { #endif CanonicalGroup group; group.types.resize(1); - group.types[0].type_def = TypeDefinition(sig, kNoSuperType); + group.types[0].type_def = + TypeDefinition(sig, kNoSuperType, v8_flags.wasm_final_types); group.types[0].is_relative_supertype = false; int canonical_index = FindCanonicalGroup(group); if (canonical_index < 0) { @@ -80,7 +81,8 @@ uint32_t TypeCanonicalizer::AddRecursiveGroup(const FunctionSig* sig) { for (auto type : sig->returns()) builder.AddReturn(type); for (auto type : sig->parameters()) builder.AddParam(type); const FunctionSig* allocated_sig = builder.Build(); - group.types[0].type_def = TypeDefinition(allocated_sig, kNoSuperType); + group.types[0].type_def = + TypeDefinition(allocated_sig, kNoSuperType, v8_flags.wasm_final_types); group.types[0].is_relative_supertype = false; canonical_groups_.emplace(group, canonical_index); canonical_supertypes_.emplace_back(kNoSuperType); @@ -150,7 +152,8 @@ TypeCanonicalizer::CanonicalType TypeCanonicalizer::CanonicalizeTypeDef( builder.AddParam( CanonicalizeValueType(module, param, recursive_group_start)); } - result = TypeDefinition(builder.Build(), canonical_supertype); + result = + TypeDefinition(builder.Build(), canonical_supertype, type.is_final); break; } case TypeDefinition::kStruct: { @@ -165,7 +168,7 @@ TypeCanonicalizer::CanonicalType TypeCanonicalizer::CanonicalizeTypeDef( builder.set_total_fields_size(original_type->total_fields_size()); result = TypeDefinition( builder.Build(StructType::Builder::kUseProvidedOffsets), - canonical_supertype); + canonical_supertype, type.is_final); break; } case TypeDefinition::kArray: { @@ -173,7 +176,7 @@ TypeCanonicalizer::CanonicalType TypeCanonicalizer::CanonicalizeTypeDef( module, type.array_type->element_type(), recursive_group_start); result = TypeDefinition( zone_.New(element_type, type.array_type->mutability()), - canonical_supertype); + canonical_supertype, type.is_final); break; } } diff --git a/src/wasm/canonical-types.h b/src/wasm/canonical-types.h index a404db2979..1750ebba9c 100644 --- a/src/wasm/canonical-types.h +++ b/src/wasm/canonical-types.h @@ -83,6 +83,8 @@ class TypeCanonicalizer { // TODO(manoskouk): Improve this. size_t hash_value() const { return base::hash_combine(base::hash_value(type_def.kind), + base::hash_value(type_def.supertype), + base::hash_value(type_def.is_final), base::hash_value(is_relative_supertype)); } }; diff --git a/src/wasm/module-decoder-impl.h b/src/wasm/module-decoder-impl.h index f709e3ccdf..c2d8c36d9c 100644 --- a/src/wasm/module-decoder-impl.h +++ b/src/wasm/module-decoder-impl.h @@ -581,15 +581,15 @@ class ModuleDecoderTemplate : public Decoder { switch (kind) { case kWasmFunctionTypeCode: { const FunctionSig* sig = consume_sig(&module_->signature_zone); - return {sig, kNoSuperType}; + return {sig, kNoSuperType, v8_flags.wasm_final_types}; } case kWasmStructTypeCode: { const StructType* type = consume_struct(&module_->signature_zone); - return {type, kNoSuperType}; + return {type, kNoSuperType, v8_flags.wasm_final_types}; } case kWasmArrayTypeCode: { const ArrayType* type = consume_array(&module_->signature_zone); - return {type, kNoSuperType}; + return {type, kNoSuperType, v8_flags.wasm_final_types}; } default: tracer_.NextLine(); @@ -601,8 +601,11 @@ class ModuleDecoderTemplate : public Decoder { TypeDefinition consume_subtype_definition() { DCHECK(enabled_features_.has_gc()); uint8_t kind = read_u8(pc(), "type kind"); - if (kind == kWasmSubtypeCode) { - consume_bytes(1, " subtype, ", tracer_); + if (kind == kWasmSubtypeCode || kind == kWasmSubtypeFinalCode) { + bool is_final = + v8_flags.wasm_final_types && kind == kWasmSubtypeFinalCode; + consume_bytes(1, is_final ? " subtype final, " : " subtype extensible, ", + tracer_); constexpr uint32_t kMaximumSupertypes = 1; uint32_t supertype_count = consume_count("supertype count", kMaximumSupertypes); @@ -621,6 +624,7 @@ class ModuleDecoderTemplate : public Decoder { } TypeDefinition type = consume_base_type_definition(); type.supertype = supertype; + type.is_final = is_final; return type; } else { return consume_base_type_definition(); @@ -650,13 +654,14 @@ class ModuleDecoderTemplate : public Decoder { consume_bytes(1, "function"); const FunctionSig* sig = consume_sig(&module_->signature_zone); if (!ok()) break; - module_->types[i] = {sig, kNoSuperType}; + module_->types[i] = {sig, kNoSuperType, v8_flags.wasm_final_types}; type_canon->AddRecursiveGroup(module_.get(), 1, i); break; } case kWasmArrayTypeCode: case kWasmStructTypeCode: case kWasmSubtypeCode: + case kWasmSubtypeFinalCode: case kWasmRecursiveTypeGroupCode: errorf( "Unknown type code 0x%02x, enable with --experimental-wasm-gc", @@ -726,6 +731,12 @@ class ModuleDecoderTemplate : public Decoder { errorf("type %u: subtyping depth is greater than allowed", i); continue; } + // This check is technically redundant; we include for the improved error + // message. + if (module->types[explicit_super].is_final) { + errorf("type %u extends final type %u", i, explicit_super); + continue; + } if (!ValidSubtypeDefinition(i, explicit_super, module, module)) { errorf("type %u has invalid explicit supertype %u", i, explicit_super); continue; diff --git a/src/wasm/wasm-constants.h b/src/wasm/wasm-constants.h index 8d40b4646f..ef0d7f44ff 100644 --- a/src/wasm/wasm-constants.h +++ b/src/wasm/wasm-constants.h @@ -62,6 +62,7 @@ constexpr uint8_t kWasmFunctionTypeCode = 0x60; constexpr uint8_t kWasmStructTypeCode = 0x5f; constexpr uint8_t kWasmArrayTypeCode = 0x5e; constexpr uint8_t kWasmSubtypeCode = 0x50; +constexpr uint8_t kWasmSubtypeFinalCode = 0x4e; constexpr uint8_t kWasmRecursiveTypeGroupCode = 0x4f; // Binary encoding of import/export kinds. diff --git a/src/wasm/wasm-module-builder.cc b/src/wasm/wasm-module-builder.cc index 0e485bd09b..b3fbdfd171 100644 --- a/src/wasm/wasm-module-builder.cc +++ b/src/wasm/wasm-module-builder.cc @@ -57,7 +57,7 @@ void WasmFunctionBuilder::EmitU32V(uint32_t val) { body_.write_u32v(val); } void WasmFunctionBuilder::SetSignature(const FunctionSig* sig) { DCHECK(!locals_.has_sig()); locals_.set_sig(sig); - signature_index_ = builder_->AddSignature(sig); + signature_index_ = builder_->AddSignature(sig, true); } void WasmFunctionBuilder::SetSignature(uint32_t sig_index) { @@ -305,38 +305,40 @@ void WasmModuleBuilder::AddDataSegment(const byte* data, uint32_t size, } uint32_t WasmModuleBuilder::ForceAddSignature(const FunctionSig* sig, + bool is_final, uint32_t supertype) { uint32_t index = static_cast(types_.size()); signature_map_.emplace(*sig, index); - types_.emplace_back(sig, supertype); + types_.emplace_back(sig, supertype, is_final); return index; } -uint32_t WasmModuleBuilder::AddSignature(const FunctionSig* sig, +uint32_t WasmModuleBuilder::AddSignature(const FunctionSig* sig, bool is_final, uint32_t supertype) { auto sig_entry = signature_map_.find(*sig); if (sig_entry != signature_map_.end()) return sig_entry->second; - return ForceAddSignature(sig, supertype); + return ForceAddSignature(sig, is_final, supertype); } uint32_t WasmModuleBuilder::AddException(const FunctionSig* type) { DCHECK_EQ(0, type->return_count()); - int type_index = AddSignature(type); + int type_index = AddSignature(type, true); uint32_t except_index = static_cast(exceptions_.size()); exceptions_.push_back(type_index); return except_index; } -uint32_t WasmModuleBuilder::AddStructType(StructType* type, +uint32_t WasmModuleBuilder::AddStructType(StructType* type, bool is_final, uint32_t supertype) { uint32_t index = static_cast(types_.size()); - types_.emplace_back(type, supertype); + types_.emplace_back(type, supertype, is_final); return index; } -uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type, uint32_t supertype) { +uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type, bool is_final, + uint32_t supertype) { uint32_t index = static_cast(types_.size()); - types_.emplace_back(type, supertype); + types_.emplace_back(type, supertype, is_final); return index; } @@ -390,7 +392,7 @@ uint32_t WasmModuleBuilder::AddImport(base::Vector name, FunctionSig* sig, base::Vector module) { DCHECK(adding_imports_allowed_); - function_imports_.push_back({module, name, AddSignature(sig)}); + function_imports_.push_back({module, name, AddSignature(sig, true)}); return static_cast(function_imports_.size() - 1); } @@ -613,9 +615,13 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const { const TypeDefinition& type = types_[i]; if (type.supertype != kNoSuperType) { - buffer->write_u8(kWasmSubtypeCode); - buffer->write_u8(1); // The supertype count is always 1. + buffer->write_u8(type.is_final ? kWasmSubtypeFinalCode + : kWasmSubtypeCode); + buffer->write_u8(1); buffer->write_u32v(type.supertype); + } else if (!type.is_final) { + buffer->write_u8(kWasmSubtypeCode); + buffer->write_u8(0); } switch (type.kind) { case TypeDefinition::kFunction: { diff --git a/src/wasm/wasm-module-builder.h b/src/wasm/wasm-module-builder.h index 09c0b68c92..dbf29bd429 100644 --- a/src/wasm/wasm-module-builder.h +++ b/src/wasm/wasm-module-builder.h @@ -334,14 +334,16 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject { // exceeded. uint32_t IncreaseTableMinSize(uint32_t table_index, uint32_t count); // Adds the signature to the module if it does not already exist. - uint32_t AddSignature(const FunctionSig* sig, + uint32_t AddSignature(const FunctionSig* sig, bool is_final, uint32_t supertype = kNoSuperType); // Does not deduplicate function signatures. - uint32_t ForceAddSignature(const FunctionSig* sig, + uint32_t ForceAddSignature(const FunctionSig* sig, bool is_final, uint32_t supertype = kNoSuperType); uint32_t AddException(const FunctionSig* type); - uint32_t AddStructType(StructType* type, uint32_t supertype = kNoSuperType); - uint32_t AddArrayType(ArrayType* type, uint32_t supertype = kNoSuperType); + uint32_t AddStructType(StructType* type, bool is_final, + uint32_t supertype = kNoSuperType); + uint32_t AddArrayType(ArrayType* type, bool is_final, + uint32_t supertype = kNoSuperType); uint32_t AddTable(ValueType type, uint32_t min_size); uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size); uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size, diff --git a/src/wasm/wasm-module.h b/src/wasm/wasm-module.h index afc91db194..a6a6094cf5 100644 --- a/src/wasm/wasm-module.h +++ b/src/wasm/wasm-module.h @@ -334,23 +334,30 @@ constexpr uint32_t kNoSuperType = std::numeric_limits::max(); struct TypeDefinition { enum Kind { kFunction, kStruct, kArray }; - TypeDefinition(const FunctionSig* sig, uint32_t supertype) - : function_sig(sig), supertype(supertype), kind(kFunction) {} - TypeDefinition(const StructType* type, uint32_t supertype) - : struct_type(type), supertype(supertype), kind(kStruct) {} - TypeDefinition(const ArrayType* type, uint32_t supertype) - : array_type(type), supertype(supertype), kind(kArray) {} + TypeDefinition(const FunctionSig* sig, uint32_t supertype, bool is_final) + : function_sig(sig), + supertype(supertype), + kind(kFunction), + is_final(is_final) {} + TypeDefinition(const StructType* type, uint32_t supertype, bool is_final) + : struct_type(type), + supertype(supertype), + kind(kStruct), + is_final(is_final) {} + TypeDefinition(const ArrayType* type, uint32_t supertype, bool is_final) + : array_type(type), + supertype(supertype), + kind(kArray), + is_final(is_final) {} TypeDefinition() - : function_sig(nullptr), supertype(kNoSuperType), kind(kFunction) {} - - union { - const FunctionSig* function_sig; - const StructType* struct_type; - const ArrayType* array_type; - }; + : function_sig(nullptr), + supertype(kNoSuperType), + kind(kFunction), + is_final(false) {} bool operator==(const TypeDefinition& other) const { - if (supertype != other.supertype || kind != other.kind) { + if (supertype != other.supertype || kind != other.kind || + is_final != other.is_final) { return false; } switch (kind) { @@ -367,8 +374,14 @@ struct TypeDefinition { return !(*this == other); } + union { + const FunctionSig* function_sig; + const StructType* struct_type; + const ArrayType* array_type; + }; uint32_t supertype; Kind kind; + bool is_final; }; struct V8_EXPORT_PRIVATE WasmDebugSymbols { @@ -558,9 +571,10 @@ struct V8_EXPORT_PRIVATE WasmModule { bool has_type(uint32_t index) const { return index < types.size(); } - void add_signature(const FunctionSig* sig, uint32_t supertype) { + void add_signature(const FunctionSig* sig, uint32_t supertype, + bool is_final) { DCHECK_NOT_NULL(sig); - add_type(TypeDefinition(sig, supertype)); + add_type(TypeDefinition(sig, supertype, is_final)); } bool has_signature(uint32_t index) const { return index < types.size() && @@ -571,9 +585,10 @@ struct V8_EXPORT_PRIVATE WasmModule { return types[index].function_sig; } - void add_struct_type(const StructType* type, uint32_t supertype) { + void add_struct_type(const StructType* type, uint32_t supertype, + bool is_final) { DCHECK_NOT_NULL(type); - add_type(TypeDefinition(type, supertype)); + add_type(TypeDefinition(type, supertype, is_final)); } bool has_struct(uint32_t index) const { return index < types.size() && types[index].kind == TypeDefinition::kStruct; @@ -583,9 +598,10 @@ struct V8_EXPORT_PRIVATE WasmModule { return types[index].struct_type; } - void add_array_type(const ArrayType* type, uint32_t supertype) { + void add_array_type(const ArrayType* type, uint32_t supertype, + bool is_final) { DCHECK_NOT_NULL(type); - add_type(TypeDefinition(type, supertype)); + add_type(TypeDefinition(type, supertype, is_final)); } bool has_array(uint32_t index) const { return index < types.size() && types[index].kind == TypeDefinition::kArray; diff --git a/src/wasm/wasm-subtyping.cc b/src/wasm/wasm-subtyping.cc index 0b74af4c70..0945a5f96f 100644 --- a/src/wasm/wasm-subtyping.cc +++ b/src/wasm/wasm-subtyping.cc @@ -141,10 +141,11 @@ bool IsNullSentinel(HeapType type) { bool ValidSubtypeDefinition(uint32_t subtype_index, uint32_t supertype_index, const WasmModule* sub_module, const WasmModule* super_module) { - TypeDefinition::Kind sub_kind = sub_module->types[subtype_index].kind; - TypeDefinition::Kind super_kind = super_module->types[supertype_index].kind; - if (sub_kind != super_kind) return false; - switch (sub_kind) { + const TypeDefinition& subtype = sub_module->types[subtype_index]; + const TypeDefinition& supertype = super_module->types[supertype_index]; + if (subtype.kind != supertype.kind) return false; + if (supertype.is_final) return false; + switch (subtype.kind) { case TypeDefinition::kFunction: return ValidFunctionSubtypeDefinition(subtype_index, supertype_index, sub_module, super_module); diff --git a/test/cctest/wasm/test-gc.cc b/test/cctest/wasm/test-gc.cc index 864c4dac23..96a4d25ed9 100644 --- a/test/cctest/wasm/test-gc.cc +++ b/test/cctest/wasm/test-gc.cc @@ -81,23 +81,24 @@ class WasmGCTester { } byte DefineStruct(std::initializer_list fields, - uint32_t supertype = kNoSuperType) { + uint32_t supertype = kNoSuperType, bool is_final = false) { StructType::Builder type_builder(&zone_, static_cast(fields.size())); for (F field : fields) { type_builder.AddField(field.first, field.second); } - return builder_.AddStructType(type_builder.Build(), supertype); + return builder_.AddStructType(type_builder.Build(), is_final, supertype); } byte DefineArray(ValueType element_type, bool mutability, - uint32_t supertype = kNoSuperType) { + uint32_t supertype = kNoSuperType, bool is_final = false) { return builder_.AddArrayType(zone_.New(element_type, mutability), - supertype); + is_final, supertype); } - byte DefineSignature(FunctionSig* sig, uint32_t supertype = kNoSuperType) { - return builder_.ForceAddSignature(sig, supertype); + byte DefineSignature(FunctionSig* sig, uint32_t supertype = kNoSuperType, + bool is_final = false) { + return builder_.ForceAddSignature(sig, is_final, supertype); } byte DefineTable(ValueType type, uint32_t min_size, uint32_t max_size) { diff --git a/test/cctest/wasm/wasm-run-utils.h b/test/cctest/wasm/wasm-run-utils.h index 319cd7fd28..fef9e16c2f 100644 --- a/test/cctest/wasm/wasm-run-utils.h +++ b/test/cctest/wasm/wasm-run-utils.h @@ -132,8 +132,9 @@ class TestingModuleBuilder { return reinterpret_cast(globals_data_ + global->offset); } + // TODO(7748): Allow selecting type finality. byte AddSignature(const FunctionSig* sig) { - test_module_->add_signature(sig, kNoSuperType); + test_module_->add_signature(sig, kNoSuperType, v8_flags.wasm_final_types); GetTypeCanonicalizer()->AddRecursiveGroup(test_module_.get(), 1); instance_object_->set_isorecursive_canonical_types( test_module_->isorecursive_canonical_type_ids.data()); diff --git a/test/fuzzer/wasm-compile.cc b/test/fuzzer/wasm-compile.cc index c6bbdaf061..7525e56370 100644 --- a/test/fuzzer/wasm-compile.cc +++ b/test/fuzzer/wasm-compile.cc @@ -192,7 +192,8 @@ class WasmGenerator { builder.AddReturn(type); } FunctionSig* sig = builder.Build(); - int sig_id = gen->builder_->builder()->AddSignature(sig); + int sig_id = gen->builder_->builder()->AddSignature( + sig, v8_flags.wasm_final_types); gen->builder_->EmitI32V(sig_id); } @@ -2428,7 +2429,7 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer { struct_builder.AddField(type, mutability); } StructType* struct_fuz = struct_builder.Build(); - builder.AddStructType(struct_fuz); + builder.AddStructType(struct_fuz, false); } for (int array_index = 0; array_index < num_arrays; array_index++) { @@ -2436,18 +2437,20 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer { &range, builder.NumTypes(), builder.NumTypes(), kAllowNonNullables, kIncludePackedTypes, kIncludeGenerics); ArrayType* array_fuz = zone->New(type, true); - builder.AddArrayType(array_fuz); + builder.AddArrayType(array_fuz, false); } // We keep the signature for the first (main) function constant. - function_signatures.push_back(builder.ForceAddSignature(sigs.i_iii())); + function_signatures.push_back( + builder.ForceAddSignature(sigs.i_iii(), v8_flags.wasm_final_types)); - for (uint8_t i = 1; i < num_functions; i++) { - FunctionSig* sig = - GenerateSig(zone, &range, kFunctionSig, builder.NumTypes()); - uint32_t signature_index = builder.ForceAddSignature(sig); - function_signatures.push_back(signature_index); - } + for (uint8_t i = 1; i < num_functions; i++) { + FunctionSig* sig = + GenerateSig(zone, &range, kFunctionSig, builder.NumTypes()); + uint32_t signature_index = + builder.ForceAddSignature(sig, v8_flags.wasm_final_types); + function_signatures.push_back(signature_index); + } int num_exceptions = 1 + (range.get() % kMaxExceptions); for (int i = 0; i < num_exceptions; ++i) { diff --git a/test/inspector/debugger/wasm-gc-anyref-expected.txt b/test/inspector/debugger/wasm-gc-anyref-expected.txt index 30bbccd2ce..582a98053f 100644 --- a/test/inspector/debugger/wasm-gc-anyref-expected.txt +++ b/test/inspector/debugger/wasm-gc-anyref-expected.txt @@ -8,14 +8,14 @@ Module instantiated. Tables populated. Setting breakpoint { - columnNumber : 260 + columnNumber : 264 lineNumber : 0 scriptId : } Paused: -Script wasm://wasm/8fd0ec76 byte offset 260: Wasm opcode 0x01 (kExprNop) +Script wasm://wasm/19fa3802 byte offset 264: Wasm opcode 0x01 (kExprNop) Scope: -at $main (0:260): +at $main (0:264): - scope (wasm-expression-stack): stack: - scope (local): diff --git a/test/mjsunit/wasm/call-ref.js b/test/mjsunit/wasm/call-ref.js index 23d118ee7c..9880f0ef6d 100644 --- a/test/mjsunit/wasm/call-ref.js +++ b/test/mjsunit/wasm/call-ref.js @@ -32,7 +32,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); var imported_wasm_function_index = builder.addImport("imports", "wasm_add", sig_index); - var locally_defined_function = builder.addFunction("sub", sig_index) .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Sub]) @@ -146,7 +145,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); let sub_struct = builder.addStruct( [makeField(kWasmI32, true), makeField(kWasmI64, true)], super_struct); let super_sig = builder.addType(makeSig([wasmRefNullType(sub_struct)], - [kWasmI32])) + [kWasmI32]), kNoSuperType, false) let sub_sig = builder.addType(makeSig([wasmRefNullType(super_struct)], [kWasmI32]), super_sig) @@ -162,8 +161,8 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); let super_struct = builder.addStruct([makeField(kWasmI32, true)]); let sub_struct = builder.addStruct( [makeField(kWasmI32, true), makeField(kWasmI64, true)], super_struct); - let super_sig = builder.addType(makeSig([wasmRefNullType(sub_struct)], - [kWasmI32])) + let super_sig = builder.addType( + makeSig([wasmRefNullType(sub_struct)], [kWasmI32]), kNoSuperType, false); builder.addImport("m", "f", super_sig); // Import is a function of the declared type. @@ -189,7 +188,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); (function () { var builder = new WasmModuleBuilder(); - let sig = builder.addType(kSig_i_i); + let sig = builder.addType(kSig_i_i, kNoSuperType, false); let sig_sub = builder.addType(kSig_i_i, sig); builder.addImport("m", "f", sig_sub); diff --git a/test/mjsunit/wasm/gc-casts-subtypes.js b/test/mjsunit/wasm/gc-casts-subtypes.js index 0023eef604..8c674bfed1 100644 --- a/test/mjsunit/wasm/gc-casts-subtypes.js +++ b/test/mjsunit/wasm/gc-casts-subtypes.js @@ -84,7 +84,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js'); (function RefTestFuncRef() { print(arguments.callee.name); let builder = new WasmModuleBuilder(); - let sigSuper = builder.addType(makeSig([kWasmI32], [])); + let sigSuper = builder.addType(makeSig([kWasmI32], []), kNoSuperType, false); let sigSub = builder.addType(makeSig([kWasmI32], []), sigSuper); builder.addFunction('fctSuper', sigSuper).addBody([]).exportFunc(); @@ -125,7 +125,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js'); (function RefCastFuncRef() { print(arguments.callee.name); let builder = new WasmModuleBuilder(); - let sigSuper = builder.addType(makeSig([kWasmI32], [])); + let sigSuper = builder.addType(makeSig([kWasmI32], []), kNoSuperType, false); let sigSub = builder.addType(makeSig([kWasmI32], []), sigSuper); builder.addFunction('fctSuper', sigSuper).addBody([]).exportFunc(); @@ -206,7 +206,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js'); (function BrOnCastFuncRef() { print(arguments.callee.name); let builder = new WasmModuleBuilder(); - let sigSuper = builder.addType(makeSig([kWasmI32], [])); + let sigSuper = builder.addType(makeSig([kWasmI32], []), kNoSuperType, false); let sigSub = builder.addType(makeSig([kWasmI32], []), sigSuper); builder.addFunction('fctSuper', sigSuper).addBody([]).exportFunc(); diff --git a/test/mjsunit/wasm/wasm-module-builder.js b/test/mjsunit/wasm/wasm-module-builder.js index 1a0dcbfe7a..3701198936 100644 --- a/test/mjsunit/wasm/wasm-module-builder.js +++ b/test/mjsunit/wasm/wasm-module-builder.js @@ -80,6 +80,7 @@ let kWasmFunctionTypeForm = 0x60; let kWasmStructTypeForm = 0x5f; let kWasmArrayTypeForm = 0x5e; let kWasmSubtypeForm = 0x50; +let kWasmSubtypeFinalForm = 0x4e; let kWasmRecursiveTypeGroupForm = 0x4f; let kNoSuperType = 0xFFFFFFFF; @@ -1207,21 +1208,23 @@ function makeField(type, mutability) { } class WasmStruct { - constructor(fields, supertype_idx) { + constructor(fields, is_final, supertype_idx) { if (!Array.isArray(fields)) { throw new Error('struct fields must be an array'); } this.fields = fields; this.type_form = kWasmStructTypeForm; + this.is_final = is_final; this.supertype = supertype_idx; } } class WasmArray { - constructor(type, mutability, supertype_idx) { + constructor(type, mutability, is_final, supertype_idx) { this.type = type; this.mutability = mutability; this.type_form = kWasmArrayTypeForm; + this.is_final = is_final; this.supertype = supertype_idx; } } @@ -1338,11 +1341,13 @@ class WasmModuleBuilder { this.explicit.push(this.createCustomSection(name, bytes)); } - addType(type, supertype_idx = kNoSuperType) { + // We use {is_final = true} so that the MVP syntax is generated for + // signatures. + addType(type, supertype_idx = kNoSuperType, is_final = true) { var pl = type.params.length; // should have params var rl = type.results.length; // should have results var type_copy = {params: type.params, results: type.results, - supertype: supertype_idx}; + is_final: is_final, supertype: supertype_idx}; this.types.push(type_copy); return this.types.length - 1; } @@ -1352,13 +1357,13 @@ class WasmModuleBuilder { return this.stringrefs.length - 1; } - addStruct(fields, supertype_idx = kNoSuperType) { - this.types.push(new WasmStruct(fields, supertype_idx)); + addStruct(fields, supertype_idx = kNoSuperType, is_final = false) { + this.types.push(new WasmStruct(fields, is_final, supertype_idx)); return this.types.length - 1; } - addArray(type, mutability, supertype_idx = kNoSuperType) { - this.types.push(new WasmArray(type, mutability, supertype_idx)); + addArray(type, mutability, supertype_idx = kNoSuperType, is_final = false) { + this.types.push(new WasmArray(type, mutability, is_final, supertype_idx)); return this.types.length - 1; } @@ -1654,9 +1659,13 @@ class WasmModuleBuilder { let type = wasm.types[i]; if (type.supertype != kNoSuperType) { - section.emit_u8(kWasmSubtypeForm); + section.emit_u8(type.is_final ? kWasmSubtypeFinalForm + : kWasmSubtypeForm); section.emit_u8(1); // supertype count section.emit_u32v(type.supertype); + } else if (!type.is_final) { + section.emit_u8(kWasmSubtypeForm); + section.emit_u8(0); // no supertypes } if (type instanceof WasmStruct) { section.emit_u8(kWasmStructTypeForm); diff --git a/test/unittests/wasm/function-body-decoder-unittest.cc b/test/unittests/wasm/function-body-decoder-unittest.cc index 1d44be21aa..7c74c6a72e 100644 --- a/test/unittests/wasm/function-body-decoder-unittest.cc +++ b/test/unittests/wasm/function-body-decoder-unittest.cc @@ -87,7 +87,7 @@ class TestModuleBuilder { return static_cast(mod.globals.size() - 1); } byte AddSignature(const FunctionSig* sig, uint32_t supertype = kNoSuperType) { - mod.add_signature(sig, supertype); + mod.add_signature(sig, supertype, v8_flags.wasm_final_types); CHECK_LE(mod.types.size(), kMaxByteSizedLeb128); GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1); return static_cast(mod.types.size() - 1); @@ -131,14 +131,15 @@ class TestModuleBuilder { for (F field : fields) { type_builder.AddField(field.first, field.second); } - mod.add_struct_type(type_builder.Build(), supertype); + mod.add_struct_type(type_builder.Build(), supertype, + v8_flags.wasm_final_types); GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1); return static_cast(mod.types.size() - 1); } byte AddArray(ValueType type, bool mutability) { ArrayType* array = mod.signature_zone.New(type, mutability); - mod.add_array_type(array, kNoSuperType); + mod.add_array_type(array, kNoSuperType, v8_flags.wasm_final_types); GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1); return static_cast(mod.types.size() - 1); } diff --git a/test/unittests/wasm/module-decoder-unittest.cc b/test/unittests/wasm/module-decoder-unittest.cc index e06d86c923..37b7c405a1 100644 --- a/test/unittests/wasm/module-decoder-unittest.cc +++ b/test/unittests/wasm/module-decoder-unittest.cc @@ -1058,8 +1058,9 @@ TEST_F(WasmModuleVerifyTest, InvalidSupertypeInRecGroup) { static const byte invalid_supertype[] = { SECTION(Type, ENTRY_COUNT(1), // -- kWasmRecursiveTypeGroupCode, ENTRY_COUNT(2), // -- - kWasmArrayTypeCode, kI32Code, 0, // -- - kWasmSubtypeCode, 1, 0, // supertype count, supertype + kWasmSubtypeCode, 0, // 0 supertypes, non-final + kWasmArrayTypeCode, kI32Code, 0, // -- + kWasmSubtypeCode, 1, 0, // supertype count, supertype kWasmArrayTypeCode, kI64Code, 0)}; EXPECT_FAILURE_WITH_MSG(invalid_supertype, @@ -1091,6 +1092,31 @@ TEST_F(WasmModuleVerifyTest, NoSupertypeSupertype) { no_supertype, "is greater than the maximum number of type definitions"); } +TEST_F(WasmModuleVerifyTest, NonSpecifiedFinalType) { + WASM_FEATURE_SCOPE(typed_funcref); + WASM_FEATURE_SCOPE(gc); + FLAG_SCOPE(wasm_final_types); + static const byte final_supertype[] = { + SECTION(Type, ENTRY_COUNT(2), // -- + kWasmStructTypeCode, 1, kI32Code, 1, // -- + kWasmSubtypeCode, 1, 0, // -- + kWasmStructTypeCode, 2, kI32Code, 1, kI32Code, 1)}; + EXPECT_FAILURE_WITH_MSG(final_supertype, "type 1 extends final type 0"); +} + +TEST_F(WasmModuleVerifyTest, SpecifiedFinalType) { + WASM_FEATURE_SCOPE(typed_funcref); + WASM_FEATURE_SCOPE(gc); + FLAG_SCOPE(wasm_final_types); + static const byte final_supertype[] = { + SECTION(Type, ENTRY_COUNT(2), // -- + kWasmSubtypeFinalCode, 0, // -- + kWasmStructTypeCode, 1, kI32Code, 1, // -- + kWasmSubtypeCode, 1, 0, // -- + kWasmStructTypeCode, 2, kI32Code, 1, kI32Code, 1)}; + EXPECT_FAILURE_WITH_MSG(final_supertype, "type 1 extends final type 0"); +} + TEST_F(WasmModuleVerifyTest, ZeroExceptions) { static const byte data[] = {SECTION(Tag, ENTRY_COUNT(0))}; ModuleResult result = DecodeModule(base::ArrayVector(data)); diff --git a/test/unittests/wasm/subtyping-unittest.cc b/test/unittests/wasm/subtyping-unittest.cc index 75e59df3bf..fd3681ee73 100644 --- a/test/unittests/wasm/subtyping-unittest.cc +++ b/test/unittests/wasm/subtyping-unittest.cc @@ -25,25 +25,25 @@ FieldInit mut(ValueType type) { return FieldInit(type, true); } FieldInit immut(ValueType type) { return FieldInit(type, false); } void DefineStruct(WasmModule* module, std::initializer_list fields, - uint32_t supertype = kNoSuperType, + uint32_t supertype = kNoSuperType, bool is_final = false, bool in_singleton_rec_group = true) { StructType::Builder builder(&module->signature_zone, static_cast(fields.size())); for (FieldInit field : fields) { builder.AddField(field.first, field.second); } - module->add_struct_type(builder.Build(), supertype); + module->add_struct_type(builder.Build(), supertype, is_final); if (in_singleton_rec_group) { GetTypeCanonicalizer()->AddRecursiveGroup(module, 1); } } void DefineArray(WasmModule* module, FieldInit element_type, - uint32_t supertype = kNoSuperType, + uint32_t supertype = kNoSuperType, bool is_final = false, bool in_singleton_rec_group = true) { module->add_array_type(module->signature_zone.New( element_type.first, element_type.second), - supertype); + supertype, is_final); if (in_singleton_rec_group) { GetTypeCanonicalizer()->AddRecursiveGroup(module, 1); } @@ -52,10 +52,11 @@ void DefineArray(WasmModule* module, FieldInit element_type, void DefineSignature(WasmModule* module, std::initializer_list params, std::initializer_list returns, - uint32_t supertype = kNoSuperType, + uint32_t supertype = kNoSuperType, bool is_final = false, bool in_singleton_rec_group = true) { module->add_signature( - FunctionSig::Build(&module->signature_zone, returns, params), supertype); + FunctionSig::Build(&module->signature_zone, returns, params), supertype, + is_final); if (in_singleton_rec_group) { GetTypeCanonicalizer()->AddRecursiveGroup(module, 1); } @@ -96,34 +97,43 @@ TEST_F(WasmSubtypingTest, Subtyping) { // Rec. group. /* 18 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(17))}, 17, false); - /* 19 */ DefineArray(module, {mut(refNull(21))}, kNoSuperType, false); + /* 19 */ DefineArray(module, {mut(refNull(21))}, kNoSuperType, false, + false); /* 20 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType, - false); - /* 21 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false); + false, false); + /* 21 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false, false); GetTypeCanonicalizer()->AddRecursiveGroup(module, 4); // Identical rec. group. /* 22 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(17))}, 17, - false); - /* 23 */ DefineArray(module, {mut(refNull(25))}, kNoSuperType, false); + false, false); + /* 23 */ DefineArray(module, {mut(refNull(25))}, kNoSuperType, false, + false); /* 24 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType, - false); - /* 25 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 24, false); + false, false); + /* 25 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 24, false, false); GetTypeCanonicalizer()->AddRecursiveGroup(module, 4); // Nonidentical rec. group: the last function extends a type outside the // recursive group. /* 26 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(17))}, 17, - false); - /* 27 */ DefineArray(module, {mut(refNull(29))}, kNoSuperType, false); + false, false); + /* 27 */ DefineArray(module, {mut(refNull(29))}, kNoSuperType, false, + false); /* 28 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType, - false); - /* 29 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false); + false, false); + /* 29 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false, false); GetTypeCanonicalizer()->AddRecursiveGroup(module, 4); /* 30 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(18))}, 18); /* 31 */ DefineStruct( module, {mut(ref(2)), immut(refNull(2)), immut(kWasmS128)}, 1); + + // Final types + /* 32 */ DefineStruct(module, {mut(kWasmI32)}, kNoSuperType, true); + /* 33 */ DefineStruct(module, {mut(kWasmI32), mut(kWasmI64)}, 32, true); + /* 34 */ DefineStruct(module, {mut(kWasmI32)}, kNoSuperType, true); + /* 35 */ DefineStruct(module, {mut(kWasmI32)}, kNoSuperType, false); } constexpr ValueType numeric_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64, @@ -305,32 +315,38 @@ TEST_F(WasmSubtypingTest, Subtyping) { VALID_SUBTYPE(ref(10), ref(10)); VALID_SUBTYPE(ref(11), ref(11)); - { - // Canonicalization tests. + // Canonicalization tests. - // Groups should only be canonicalized to identical groups. - IDENTICAL(18, 22); - IDENTICAL(19, 23); - IDENTICAL(20, 24); - IDENTICAL(21, 25); + // Groups should only be canonicalized to identical groups. + IDENTICAL(18, 22); + IDENTICAL(19, 23); + IDENTICAL(20, 24); + IDENTICAL(21, 25); - DISTINCT(18, 26); - DISTINCT(19, 27); - DISTINCT(20, 28); - DISTINCT(21, 29); + DISTINCT(18, 26); + DISTINCT(19, 27); + DISTINCT(20, 28); + DISTINCT(21, 29); - // A type should not be canonicalized to an identical one with a different - // group structure. - DISTINCT(18, 17); + // A type should not be canonicalized to an identical one with a different + // group structure. + DISTINCT(18, 17); - // A subtype should also be subtype of an equivalent type. - VALID_SUBTYPE(ref(30), ref(18)); - VALID_SUBTYPE(ref(30), ref(22)); - NOT_SUBTYPE(ref(30), ref(26)); + // A subtype should also be subtype of an equivalent type. + VALID_SUBTYPE(ref(30), ref(18)); + VALID_SUBTYPE(ref(30), ref(22)); + NOT_SUBTYPE(ref(30), ref(26)); - // Rtts of identical types are subtype-related. - SUBTYPE(ValueType::Rtt(8), ValueType::Rtt(17)); - } + // Final types + + // A type is not a valid subtype of a final type. + NOT_VALID_SUBTYPE(ref(33), ref(32)); + IDENTICAL(32, 34); + // A final and a non-final + DISTINCT(32, 35); + + // Rtts of identical types are subtype-related. + SUBTYPE(ValueType::Rtt(8), ValueType::Rtt(17)); // Unions and intersections. diff --git a/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc b/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc index 67eac63803..01fdedfd1b 100644 --- a/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc +++ b/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc @@ -2,9 +2,9 @@ 0x01, 0x00, 0x00, 0x00, // wasm version 0x01, // section kind: Type - 0x3a, // section length 58 + 0x3c, // section length 60 0x0b, // types count 11 - 0x5f, 0x00, // type #0 $type0 kind: struct, field count 0 + 0x50, 0x00, 0x5f, 0x00, // type #0 $type0 subtype, supertype count 0, kind: struct, field count 0 0x5f, 0x01, 0x7f, 0x00, // type #1 $type1 kind: struct, field count 1: i32 immutable 0x5f, 0x02, // type #2 $type2 kind: struct, field count 2 0x7f, 0x01, // i32 mutable diff --git a/test/wasm-api-tests/table.cc b/test/wasm-api-tests/table.cc index c28dcce4a7..4425a678f0 100644 --- a/test/wasm-api-tests/table.cc +++ b/test/wasm-api-tests/table.cc @@ -39,7 +39,7 @@ void ExpectResult(int expected, const Func* func, int arg1, int arg2) { TEST_F(WasmCapiTest, Table) { const uint32_t table_index = builder()->AddTable(kWasmFuncRef, 2, 10); builder()->AddExport(base::CStrVector("table"), kExternalTable, table_index); - const uint32_t sig_i_i_index = builder()->AddSignature(wasm_i_i_sig()); + const uint32_t sig_i_i_index = builder()->AddSignature(wasm_i_i_sig(), true); ValueType reps[] = {kWasmI32, kWasmI32, kWasmI32}; FunctionSig call_sig(1, 2, reps); byte call_code[] = { From 57b17e0cf207d3ddc51f6aa529e1cb6858f11c06 Mon Sep 17 00:00:00 2001 From: Manos Koukoutos Date: Tue, 20 Dec 2022 13:49:54 +0100 Subject: [PATCH 005/654] [wasm-gc] Add TypeGuards when optimizing away type casts This way we do not lose type information while optimizing. Bug: v8:7748 Change-Id: I5c25a3863e4400f2f69431e73b30fd7c0e626a27 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4117071 Reviewed-by: Matthias Liedtke Commit-Queue: Matthias Liedtke Auto-Submit: Manos Koukoutos Cr-Commit-Position: refs/heads/main@{#84956} --- src/compiler/wasm-gc-operator-reducer.cc | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/src/compiler/wasm-gc-operator-reducer.cc b/src/compiler/wasm-gc-operator-reducer.cc index aca45865de..93b2c34a8e 100644 --- a/src/compiler/wasm-gc-operator-reducer.cc +++ b/src/compiler/wasm-gc-operator-reducer.cc @@ -202,9 +202,12 @@ Reduction WasmGCOperatorReducer::ReduceAssertNotNull(Node* node) { // Optimize the check away if the argument is known to be non-null. if (object_type.type.is_non_nullable()) { - ReplaceWithValue(node, object); - node->Kill(); - return Replace(object); + // First, relax control. + ReplaceWithValue(node, node, node, control); + // Use a TypeGuard node to not lose any type information. + NodeProperties::ChangeOp( + node, common()->TypeGuard(NodeProperties::GetType(node))); + return Changed(node); } object_type.type = object_type.type.AsNonNull(); @@ -259,10 +262,15 @@ Reduction WasmGCOperatorReducer::ReduceWasmTypeCast(Node* node) { wasm::HeapType(rtt_type.type.ref_index()), object_type.module, rtt_type.module)) { if (to_nullable) { - // Type cast will always succeed. Remove it. - ReplaceWithValue(node, object); - node->Kill(); - return Replace(object); + // Type cast will always succeed. Turn it into a TypeGuard to not lose any + // type information. + // First, relax control. + ReplaceWithValue(node, node, node, control); + // Remove rtt input. + node->RemoveInput(1); + NodeProperties::ChangeOp( + node, common()->TypeGuard(NodeProperties::GetType(node))); + return Changed(node); } else { gasm_.InitializeEffectControl(effect, control); return Replace( From 4c61bb3131b7951ed2ed896b4df6110b1e5c072f Mon Sep 17 00:00:00 2001 From: Matthias Liedtke Date: Tue, 20 Dec 2022 13:12:02 +0100 Subject: [PATCH 006/654] [wasm-gc] Interop: Do not throw on [[Get]] and prototype retrieval With this change property accesses on WasmObject and WasmArray return undefined instead of throwing. Furthermore retrieving the prototype of a WasmObject/WasmArray returns null. Bug: v8:13523 Change-Id: I12e3b9fb6ac9a7305f001a3b3835854068c3064f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110995 Reviewed-by: Toon Verwaest Commit-Queue: Matthias Liedtke Cr-Commit-Position: refs/heads/main@{#84957} --- src/builtins/builtins-object.cc | 4 --- src/builtins/promise-resolve.tq | 8 ----- src/json/json-stringifier.cc | 5 +++ src/objects/js-objects-inl.h | 6 ---- src/objects/objects.cc | 11 +----- .../mjsunit/wasm/gc-js-interop-collections.js | 6 ++-- .../wasm/gc-js-interop-global-constructors.js | 35 ++++++++++++++----- test/mjsunit/wasm/gc-js-interop-objects.js | 33 +++++++++-------- test/mjsunit/wasm/gc-js-interop-wasm.js | 2 +- test/mjsunit/wasm/gc-js-interop.js | 20 ++++++----- 10 files changed, 66 insertions(+), 64 deletions(-) diff --git a/src/builtins/builtins-object.cc b/src/builtins/builtins-object.cc index e6d26ef7c7..48bb627de0 100644 --- a/src/builtins/builtins-object.cc +++ b/src/builtins/builtins-object.cc @@ -148,11 +148,7 @@ Object ObjectLookupAccessor(Isolate* isolate, Handle object, } return ObjectLookupAccessor(isolate, prototype, key, component); } - case LookupIterator::WASM_OBJECT: - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kWasmObjectsAreOpaque)); - case LookupIterator::INTEGER_INDEXED_EXOTIC: case LookupIterator::DATA: return ReadOnlyRoots(isolate).undefined_value(); diff --git a/src/builtins/promise-resolve.tq b/src/builtins/promise-resolve.tq index 8a5a90bd7f..114b1e922b 100644 --- a/src/builtins/promise-resolve.tq +++ b/src/builtins/promise-resolve.tq @@ -161,14 +161,6 @@ ResolvePromise(implicit context: Context)( } goto Slow; } label Slow deferred { - // Skip "then" lookup for Wasm objects as they are opaque. - // TODO(v8:13523): Drop this special case after changing what [[Get]] - // does in general. - @if(V8_ENABLE_WEBASSEMBLY) - if (Is(resolution)) { - return FulfillPromise(promise, resolution); - } - // 9. Let then be Get(resolution, "then"). // 10. If then is an abrupt completion, then try { diff --git a/src/json/json-stringifier.cc b/src/json/json-stringifier.cc index 98ff273a2e..cd10e1fbc4 100644 --- a/src/json/json-stringifier.cc +++ b/src/json/json-stringifier.cc @@ -591,6 +591,11 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle object, JSRawJson::kRawJsonIndex), isolate_))); return SUCCESS; +#if V8_ENABLE_WEBASSEMBLY + case WASM_STRUCT_TYPE: + case WASM_ARRAY_TYPE: + return UNCHANGED; +#endif default: if (InstanceTypeChecker::IsString(instance_type)) { if (deferred_string_key) SerializeDeferredKey(comma, key); diff --git a/src/objects/js-objects-inl.h b/src/objects/js-objects-inl.h index b2ceeb2999..5a47a445d8 100644 --- a/src/objects/js-objects-inl.h +++ b/src/objects/js-objects-inl.h @@ -100,12 +100,6 @@ MaybeHandle JSReceiver::GetPrototype(Isolate* isolate, // We don't expect access checks to be needed on JSProxy objects. DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject()); - if (receiver->IsWasmObject()) { - THROW_NEW_ERROR(isolate, - NewTypeError(MessageTemplate::kWasmObjectsAreOpaque), - HeapObject); - } - PrototypeIterator iter(isolate, receiver, kStartAtReceiver, PrototypeIterator::END_AT_NON_HIDDEN); do { diff --git a/src/objects/objects.cc b/src/objects/objects.cc index 01d826c43b..a2c56b1caf 100644 --- a/src/objects/objects.cc +++ b/src/objects/objects.cc @@ -1168,9 +1168,7 @@ MaybeHandle Object::GetProperty(LookupIterator* it, return result; } case LookupIterator::WASM_OBJECT: - THROW_NEW_ERROR(it->isolate(), - NewTypeError(MessageTemplate::kWasmObjectsAreOpaque), - Object); + return it->isolate()->factory()->undefined_value(); case LookupIterator::INTERCEPTOR: { bool done; Handle result; @@ -5595,13 +5593,6 @@ MaybeHandle JSPromise::Resolve(Handle promise, // is intact, as that guards the lookup path for the "then" property // on JSPromise instances which have the (initial) %PromisePrototype%. then = isolate->promise_then(); - } else if (receiver->IsWasmObject()) { - // Special case: [[Get]] throws an exception for Wasm objects, but we want - // to be able to resolve promises with them, so pretend that we looked - // up "then" and it was undefined. - // TODO(v8:13523): Drop this special case after changing what [[Get]] - // does in general. - then = isolate->factory()->undefined_value(); } else { then = JSReceiver::GetProperty(isolate, receiver, isolate->factory()->then_string()); diff --git a/test/mjsunit/wasm/gc-js-interop-collections.js b/test/mjsunit/wasm/gc-js-interop-collections.js index 757691ab11..4bbf9aa248 100644 --- a/test/mjsunit/wasm/gc-js-interop-collections.js +++ b/test/mjsunit/wasm/gc-js-interop-collections.js @@ -10,7 +10,7 @@ let {struct, array} = CreateWasmObjects(); for (const wasm_obj of [struct, array]) { // Test Array. - testThrowsRepeated(() => Array.from(wasm_obj), TypeError); + repeated(() => assertEquals([], Array.from(wasm_obj))); repeated(() => assertFalse(Array.isArray(wasm_obj))); repeated(() => assertEquals([wasm_obj], Array.of(wasm_obj))); testThrowsRepeated(() => [1, 2].at(wasm_obj), TypeError); @@ -65,7 +65,7 @@ for (const wasm_obj of [struct, array]) { arr.unshift(wasm_obj); assertEquals([wasm_obj, 1, 2], arr); }); - testThrowsRepeated(() => Int8Array.from(wasm_obj), TypeError); + repeated(() => assertEquals(Int8Array.from([]), Int8Array.from(wasm_obj))); testThrowsRepeated(() => Int8Array.of(wasm_obj), TypeError); for (let ArrayType of [Int8Array, Int16Array, Int32Array, Uint8Array, Uint16Array, @@ -88,7 +88,7 @@ for (const wasm_obj of [struct, array]) { testThrowsRepeated(() => array.map(() => wasm_obj), TypeError); testThrowsRepeated(() => array.reduce(wasm_obj), TypeError); testThrowsRepeated(() => array.reduceRight(wasm_obj), TypeError); - testThrowsRepeated(() => array.set(wasm_obj), TypeError); + repeated(() => array.set(wasm_obj)); testThrowsRepeated(() => array.set([wasm_obj]), TypeError); testThrowsRepeated(() => array.slice(wasm_obj, 1), TypeError); testThrowsRepeated(() => array.some(wasm_obj), TypeError); diff --git a/test/mjsunit/wasm/gc-js-interop-global-constructors.js b/test/mjsunit/wasm/gc-js-interop-global-constructors.js index 0c11073480..45214197ef 100644 --- a/test/mjsunit/wasm/gc-js-interop-global-constructors.js +++ b/test/mjsunit/wasm/gc-js-interop-global-constructors.js @@ -56,18 +56,30 @@ for (const wasm_obj of [struct, array]) { repeated(() => assertSame(wasm_obj, new Array(wasm_obj)[0])); testThrowsRepeated(() => new ArrayBuffer(wasm_obj), TypeError); testThrowsRepeated(() => new BigInt(wasm_obj), TypeError); - testThrowsRepeated(() => new BigInt64Array(wasm_obj), TypeError); - testThrowsRepeated(() => new BigUint64Array(wasm_obj), TypeError); + repeated(() => assertEquals(new BigInt64Array(), + new BigInt64Array(wasm_obj))); + testThrowsRepeated(() => new BigInt64Array([wasm_obj]), TypeError); + repeated(() => assertEquals(new BigUint64Array(), + new BigUint64Array(wasm_obj))); + testThrowsRepeated(() => new BigUint64Array([wasm_obj]), TypeError); repeated(() => assertEquals(true, (new Boolean(wasm_obj)).valueOf())); testThrowsRepeated(() => new DataView(wasm_obj), TypeError); testThrowsRepeated(() => new Date(wasm_obj), TypeError); testThrowsRepeated(() => new Error(wasm_obj), TypeError); testThrowsRepeated(() => new EvalError(wasm_obj), TypeError); - testThrowsRepeated(() => new Float64Array(wasm_obj), TypeError); + repeated(() => assertEquals(new Float64Array(), + new Float64Array(wasm_obj))); + testThrowsRepeated(() => new Float64Array([wasm_obj]), TypeError); testThrowsRepeated(() => new Function(wasm_obj), TypeError); - testThrowsRepeated(() => new Int8Array(wasm_obj), TypeError); - testThrowsRepeated(() => new Int16Array(wasm_obj), TypeError); - testThrowsRepeated(() => new Int32Array(wasm_obj), TypeError); + repeated(() => assertEquals(new Int8Array(), + new Int8Array(wasm_obj))); + testThrowsRepeated(() => new Int8Array([wasm_obj]), TypeError); + repeated(() => assertEquals(new Int16Array(), + new Int16Array(wasm_obj))); + testThrowsRepeated(() => new Int16Array([wasm_obj]), TypeError); + repeated(() => assertEquals(new Int32Array(), + new Int32Array(wasm_obj))); + testThrowsRepeated(() => new Int32Array([wasm_obj]), TypeError); testThrowsRepeated(() => new Map(wasm_obj), TypeError); testThrowsRepeated(() => new Number(wasm_obj), TypeError); repeated(() => assertSame(wasm_obj, new Object(wasm_obj))); @@ -82,9 +94,14 @@ for (const wasm_obj of [struct, array]) { testThrowsRepeated(() => new Symbol(wasm_obj), TypeError); testThrowsRepeated(() => new SyntaxError(wasm_obj), TypeError); testThrowsRepeated(() => new TypeError(wasm_obj), TypeError); - testThrowsRepeated(() => new Uint8Array(wasm_obj), TypeError); - testThrowsRepeated(() => new Uint16Array(wasm_obj), TypeError); - testThrowsRepeated(() => new Uint32Array(wasm_obj), TypeError); + repeated(() => assertEquals(new Uint8Array(), + new Uint8Array(wasm_obj))); + testThrowsRepeated(() => new Uint8Array([wasm_obj]), TypeError); + repeated(() => assertEquals(new Uint16Array(), + new Uint16Array(wasm_obj))); + testThrowsRepeated(() => new Uint16Array([wasm_obj]), TypeError); + repeated(() => assertEquals(new Uint32Array(), + new Uint32Array(wasm_obj))); testThrowsRepeated(() => new URIError(wasm_obj), TypeError); testThrowsRepeated(() => new WeakMap(wasm_obj), TypeError); repeated(() => assertSame(wasm_obj, new WeakRef(wasm_obj).deref())); diff --git a/test/mjsunit/wasm/gc-js-interop-objects.js b/test/mjsunit/wasm/gc-js-interop-objects.js index 3ec837e19f..ffd2cd2da3 100644 --- a/test/mjsunit/wasm/gc-js-interop-objects.js +++ b/test/mjsunit/wasm/gc-js-interop-objects.js @@ -12,10 +12,10 @@ for (const wasm_obj of [struct, array]) { // Test Object. testThrowsRepeated(() => Object.freeze(wasm_obj), TypeError); testThrowsRepeated(() => Object.seal(wasm_obj), TypeError); - testThrowsRepeated( - () => Object.prototype.__lookupGetter__.call(wasm_obj, 'foo'), TypeError); - testThrowsRepeated( - () => Object.prototype.__lookupSetter__.call(wasm_obj, 'foo'), TypeError); + repeated(() => assertSame( + undefined, Object.prototype.__lookupGetter__.call(wasm_obj, 'foo'))); + repeated(() => assertSame( + undefined, Object.prototype.__lookupSetter__.call(wasm_obj, 'foo'))); testThrowsRepeated( () => Object.prototype.__defineGetter__.call(wasm_obj, 'foo', () => 42), TypeError); @@ -55,7 +55,7 @@ for (const wasm_obj of [struct, array]) { testThrowsRepeated( () => Object.defineProperty(wasm_obj, 'prop', {value: 1}), TypeError); testThrowsRepeated(() => Object.fromEntries(wasm_obj), TypeError); - testThrowsRepeated(() => Object.getPrototypeOf(wasm_obj), TypeError); + repeated(() => assertSame(null, Object.getPrototypeOf(wasm_obj))); repeated(() => assertFalse(Object.hasOwn(wasm_obj, 'test'))); testThrowsRepeated(() => Object.preventExtensions(wasm_obj), TypeError); testThrowsRepeated(() => Object.setPrototypeOf(wasm_obj, Object), TypeError); @@ -67,11 +67,11 @@ for (const wasm_obj of [struct, array]) { let obj = Object.create(wasm_obj); repeated(() => assertSame(wasm_obj, Object.getPrototypeOf(obj))); repeated(() => assertSame(wasm_obj, Reflect.getPrototypeOf(obj))); - testThrowsRepeated(() => obj.__proto__, TypeError); + repeated(() => assertSame(undefined, obj.__proto__)); testThrowsRepeated(() => obj.__proto__ = wasm_obj, TypeError); // Property access fails. - testThrowsRepeated(() => obj[0], TypeError); - testThrowsRepeated(() => obj.prop, TypeError); + repeated(() => assertSame(undefined, obj[0])); + repeated(() => assertSame(undefined, obj.prop)); testThrowsRepeated(() => obj.toString(), TypeError); // Most conversions fail as it will use .toString(), .valueOf(), ... testThrowsRepeated(() => `${obj}`, TypeError); @@ -97,11 +97,11 @@ for (const wasm_obj of [struct, array]) { () => assertEquals([wasm_obj, 1], Reflect.apply(fct, wasm_obj, [1]))); repeated( () => assertEquals([{}, wasm_obj], Reflect.apply(fct, {}, [wasm_obj]))); - testThrowsRepeated(() => Reflect.apply(fct, 1, wasm_obj), TypeError); + repeated(() => assertEquals([new Number(1), undefined], Reflect.apply(fct, 1, wasm_obj))); testThrowsRepeated(() => Reflect.apply(wasm_obj, null, []), TypeError); } testThrowsRepeated(() => Reflect.construct(wasm_obj, []), TypeError); - testThrowsRepeated(() => Reflect.construct(Object, wasm_obj), TypeError); + repeated(() => assertEquals({}, Reflect.construct(Object, wasm_obj))); testThrowsRepeated(() => Reflect.construct(Object, [], wasm_obj), TypeError); testThrowsRepeated( () => Reflect.defineProperty(wasm_obj, 'prop', {value: 1}), TypeError); @@ -124,8 +124,8 @@ for (const wasm_obj of [struct, array]) { }); testThrowsRepeated(() => Reflect.deleteProperty(wasm_obj, 'prop'), TypeError); testThrowsRepeated(() => Reflect.deleteProperty({}, wasm_obj), TypeError); - testThrowsRepeated(() => Reflect.get(wasm_obj, 'prop'), TypeError); - testThrowsRepeated(() => Reflect.getPrototypeOf(wasm_obj), TypeError); + repeated(() => assertSame(undefined, Reflect.get(wasm_obj, 'prop'))); + repeated(() => assertSame(null, Reflect.getPrototypeOf(wasm_obj))); repeated(() => assertFalse(Reflect.has(wasm_obj, 'prop'))); repeated(() => assertTrue(Reflect.has({wasm_obj}, 'wasm_obj'))); @@ -149,8 +149,11 @@ for (const wasm_obj of [struct, array]) { testThrowsRepeated(() => proxy.abc = 123, TypeError); } { - let proxy = new Proxy({}, wasm_obj); - testThrowsRepeated(() => proxy.abc, TypeError); + let underlyingObject = {}; + let proxy = new Proxy(underlyingObject, wasm_obj); + repeated(() => assertSame(undefined, proxy.abc)); + underlyingObject.abc = 123; + repeated(() => assertSame(123, proxy.abc)); } { const handler = { @@ -166,7 +169,7 @@ for (const wasm_obj of [struct, array]) { } { let proxy = Proxy.revocable({}, wasm_obj).proxy; - testThrowsRepeated(() => proxy.abc, TypeError); + repeated(() => assertSame(undefined, proxy.abc)); } // Ensure no statement re-assigned wasm_obj by accident. diff --git a/test/mjsunit/wasm/gc-js-interop-wasm.js b/test/mjsunit/wasm/gc-js-interop-wasm.js index f6c0e57941..82e365ba26 100644 --- a/test/mjsunit/wasm/gc-js-interop-wasm.js +++ b/test/mjsunit/wasm/gc-js-interop-wasm.js @@ -72,7 +72,7 @@ for (const wasm_obj of [struct, array]) { let tag = new WebAssembly.Tag({parameters: ['structref']}); testThrowsRepeated(() => new WebAssembly.Exception(wasm_obj), TypeError); - testThrowsRepeated(() => new WebAssembly.Exception(tag, wasm_obj), TypeError); + repeated(() => new WebAssembly.Exception(tag, wasm_obj)); repeated(() => new WebAssembly.Exception(tag, [wasm_obj])); let exception = new WebAssembly.Exception(tag, [wasm_obj]); testThrowsRepeated(() => exception.is(wasm_obj), TypeError); diff --git a/test/mjsunit/wasm/gc-js-interop.js b/test/mjsunit/wasm/gc-js-interop.js index 7b4ba33a21..bdbb720f83 100644 --- a/test/mjsunit/wasm/gc-js-interop.js +++ b/test/mjsunit/wasm/gc-js-interop.js @@ -8,15 +8,16 @@ d8.file.execute('test/mjsunit/wasm/gc-js-interop-helpers.js'); let {struct, array} = CreateWasmObjects(); for (const wasm_obj of [struct, array]) { - testThrowsRepeated(() => wasm_obj.foo, TypeError); + repeated(() => assertSame(undefined, wasm_obj.foo)); testThrowsRepeated(() => wasm_obj.foo = 42, TypeError); - testThrowsRepeated(() => wasm_obj[0], TypeError); + repeated(() => assertSame(undefined, wasm_obj[0])); testThrowsRepeated(() => wasm_obj[0] = undefined, TypeError); - testThrowsRepeated(() => wasm_obj.__proto__, TypeError); + repeated(() => assertSame(undefined, wasm_obj.__proto__)); + repeated(() => assertSame( + null, Object.prototype.__lookupGetter__("__proto__").call(wasm_obj))); testThrowsRepeated( () => Object.prototype.__proto__.call(wasm_obj), TypeError); testThrowsRepeated(() => wasm_obj.__proto__ = null, TypeError); - testThrowsRepeated(() => JSON.stringify(wasm_obj), TypeError); testThrowsRepeated(() => { for (let p in wasm_obj) { } @@ -50,7 +51,7 @@ for (const wasm_obj of [struct, array]) { testThrowsRepeated(() => `${wasm_obj}`, TypeError); testThrowsRepeated(() => wasm_obj`test`, TypeError); testThrowsRepeated(() => new wasm_obj, TypeError); - testThrowsRepeated(() => wasm_obj?.property, TypeError); + repeated(() => assertSame(undefined, wasm_obj?.property)); repeated(() => assertEquals(undefined, void wasm_obj)); testThrowsRepeated(() => 2 == wasm_obj, TypeError); @@ -69,7 +70,7 @@ for (const wasm_obj of [struct, array]) { testThrowsRepeated(() => { let [] = wasm_obj; }, TypeError); testThrowsRepeated(() => { let [a, b] = wasm_obj; }, TypeError); testThrowsRepeated(() => { let [...all] = wasm_obj; }, TypeError); - testThrowsRepeated(() => { let {a} = wasm_obj; }, TypeError); + repeated(() => { let {a} = wasm_obj; assertSame(undefined, a); }); repeated(() => { let {} = wasm_obj; }, TypeError); repeated(() => { let {...rest} = wasm_obj; @@ -124,7 +125,8 @@ for (const wasm_obj of [struct, array]) { repeated( () => assertEquals([new Number(1), wasm_obj], fct.apply(1, [wasm_obj]))); - testThrowsRepeated(() => fct.apply(1, wasm_obj), TypeError); + repeated( + () => assertEquals([new Number(1), undefined], fct.apply(1, wasm_obj))); repeated(() => assertEquals([wasm_obj, 1], fct.bind(wasm_obj)(1))); repeated(() => assertEquals([wasm_obj, 1], fct.call(wasm_obj, 1))); } @@ -224,10 +226,12 @@ for (const wasm_obj of [struct, array]) { testThrowsRepeated(() => JSON.parse(wasm_obj), TypeError); repeated(() => assertEquals({x: 1}, JSON.parse('{"x": 1}', wasm_obj))); - testThrowsRepeated(() => JSON.stringify(wasm_obj), TypeError); + repeated(() => assertEquals(undefined, JSON.stringify(wasm_obj))); repeated(() => assertEquals('{"x":1}', JSON.stringify({x: 1}, wasm_obj))); repeated( () => assertEquals('{"x":1}', JSON.stringify({x: 1}, null, wasm_obj))); + repeated( + () => assertEquals("{}", JSON.stringify({wasm_obj}))); // Yielding wasm objects from a generator function is valid. repeated(() => { From 5b3965374ebcbd69000f0de0d3037cfd8ea0fbf5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Inf=C3=BChr?= Date: Tue, 20 Dec 2022 15:24:54 +0100 Subject: [PATCH 007/654] [heap] Fix slot recording in shared DescriptorArrays MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The write barrier for DescriptorArray was skipping slot recording unless is_compacting_ was set. However, that flag only applies to local objects but not shared objects in client isolates. This CL fixes this by using the same logic as the regular write barrier for deciding whether to record slots or not. Bug: v8:13267 Change-Id: Ib7c5567615aa04cf79225fc7613a9db1d4ae0f0e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4118866 Commit-Queue: Dominik Inführ Reviewed-by: Michael Lippautz Cr-Commit-Position: refs/heads/main@{#84958} --- src/heap/marking-barrier-inl.h | 16 +++++++++++++--- src/heap/marking-barrier.cc | 8 ++------ src/heap/marking-barrier.h | 2 ++ 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/heap/marking-barrier-inl.h b/src/heap/marking-barrier-inl.h index 3ac74fef8b..7a75d5e403 100644 --- a/src/heap/marking-barrier-inl.h +++ b/src/heap/marking-barrier-inl.h @@ -86,21 +86,31 @@ void MarkingBarrier::MarkValueLocal(HeapObject value) { template inline void MarkingBarrier::MarkRange(HeapObject host, TSlot start, TSlot end) { auto* isolate = heap_->isolate(); - const bool is_compacting = is_compacting_; + const bool record_slots = + IsCompacting(host) && + !MemoryChunk::FromHeapObject(host)->ShouldSkipEvacuationSlotRecording(); for (TSlot slot = start; slot < end; ++slot) { typename TSlot::TObject object = slot.Relaxed_Load(); HeapObject heap_object; // Mark both, weak and strong edges. if (object.GetHeapObject(isolate, &heap_object)) { MarkValue(host, heap_object); - if (is_compacting) { - DCHECK(is_major()); + if (record_slots) { major_collector_->RecordSlot(host, HeapObjectSlot(slot), heap_object); } } } } +bool MarkingBarrier::IsCompacting(HeapObject object) const { + if (is_compacting_) { + DCHECK(is_major()); + return true; + } + + return shared_heap_worklist_.has_value() && object.InSharedWritableHeap(); +} + bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) { if (marking_state_.WhiteToGrey(obj)) { current_worklist_->Push(obj); diff --git a/src/heap/marking-barrier.cc b/src/heap/marking-barrier.cc index 328c1bbc93..26b9a3d847 100644 --- a/src/heap/marking-barrier.cc +++ b/src/heap/marking-barrier.cc @@ -43,12 +43,8 @@ void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot, DCHECK(MemoryChunk::FromHeapObject(host)->IsMarking()); MarkValue(host, value); - if (slot.address()) { - if (is_compacting_ || - (shared_heap_worklist_.has_value() && host.InSharedWritableHeap())) { - DCHECK_IMPLIES(is_compacting_, is_major()); - MarkCompactCollector::RecordSlot(host, slot, value); - } + if (slot.address() && IsCompacting(host)) { + MarkCompactCollector::RecordSlot(host, slot, value); } } diff --git a/src/heap/marking-barrier.h b/src/heap/marking-barrier.h index 29312da7a3..c5479500ff 100644 --- a/src/heap/marking-barrier.h +++ b/src/heap/marking-barrier.h @@ -73,6 +73,8 @@ class MarkingBarrier { template inline void MarkRange(HeapObject value, TSlot start, TSlot end); + inline bool IsCompacting(HeapObject object) const; + bool is_major() const { return marking_barrier_type_ == MarkingBarrierType::kMajor; } From 4a37729f24fae7ec889e1b49194922c5d4792684 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Fl=C3=BCckiger?= Date: Tue, 20 Dec 2022 14:05:55 +0000 Subject: [PATCH 008/654] [static-roots] Ensure only initialized memory is dumped to r/o snapshot MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Eliminate 3 sources of uninitialized memory ending up in the memory snapshot. (1) padding of sequential strings (2) unused heap space (3) filler objects for alignment. Bug: v8:13466 Change-Id: I139a5ebdda32b454096137e0f3bee5a6696194be Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110900 Reviewed-by: Toon Verwaest Commit-Queue: Toon Verwaest Auto-Submit: Olivier Flückiger Cr-Commit-Position: refs/heads/main@{#84959} --- src/heap/factory-base.cc | 3 +++ src/heap/heap.cc | 6 +++--- src/heap/heap.h | 5 ++++- src/heap/setup-heap-internal.cc | 13 +++++++++---- src/objects/string-inl.h | 20 ++++++++++++++++++++ src/objects/string.cc | 8 ++++++++ src/objects/string.h | 12 ++++++++++++ src/snapshot/read-only-serializer.cc | 14 ++++++++++---- src/snapshot/serializer.cc | 9 +-------- tools/gcmole/test-expectations.txt | 2 +- 10 files changed, 71 insertions(+), 21 deletions(-) diff --git a/src/heap/factory-base.cc b/src/heap/factory-base.cc index 7571181a69..4040c61fc1 100644 --- a/src/heap/factory-base.cc +++ b/src/heap/factory-base.cc @@ -677,6 +677,7 @@ MaybeHandle FactoryBase::NewRawStringWithMap( string.set_length(length); string.set_raw_hash_field(String::kEmptyHashField); DCHECK_EQ(size, string.Size()); + string.clear_padding(); return handle(string, isolate()); } @@ -1057,6 +1058,7 @@ FactoryBase::AllocateRawOneByteInternalizedString( answer.set_length(length); answer.set_raw_hash_field(raw_hash_field); DCHECK_EQ(size, answer.Size()); + answer.clear_padding(); return handle(answer, isolate()); } @@ -1078,6 +1080,7 @@ FactoryBase::AllocateRawTwoByteInternalizedString( answer.set_length(length); answer.set_raw_hash_field(raw_hash_field); DCHECK_EQ(size, answer.Size()); + answer.clear_padding(); return handle(answer, isolate()); } diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 3e690485d3..3c172a09c0 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -3214,9 +3214,9 @@ void Heap::CreateFillerObjectAtSweeper(Address addr, int size) { ClearRecordedSlots::kNo, VerifyNoSlotsRecorded::kNo); } -void Heap::CreateFillerObjectAt(Address addr, int size) { - CreateFillerObjectAtRaw(addr, size, - ClearFreedMemoryMode::kDontClearFreedMemory, +void Heap::CreateFillerObjectAt(Address addr, int size, + ClearFreedMemoryMode clear_memory_mode) { + CreateFillerObjectAtRaw(addr, size, clear_memory_mode, ClearRecordedSlots::kNo, VerifyNoSlotsRecorded::kYes); } diff --git a/src/heap/heap.h b/src/heap/heap.h index 0c4734ecae..39e6efb080 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -521,7 +521,10 @@ class Heap { // Initialize a filler object to keep the ability to iterate over the heap // when introducing gaps within pages. This method will verify that no slots // are recorded in this free memory. - V8_EXPORT_PRIVATE void CreateFillerObjectAt(Address addr, int size); + V8_EXPORT_PRIVATE void CreateFillerObjectAt( + Address addr, int size, + ClearFreedMemoryMode clear_memory_mode = + ClearFreedMemoryMode::kDontClearFreedMemory); // Initialize a filler object at a specific address. Unlike // `CreateFillerObjectAt` this method will not perform slot verification since diff --git a/src/heap/setup-heap-internal.cc b/src/heap/setup-heap-internal.cc index 8f7c1e033b..5ce25f16ea 100644 --- a/src/heap/setup-heap-internal.cc +++ b/src/heap/setup-heap-internal.cc @@ -735,10 +735,15 @@ void Heap::CreateInitialReadOnlyObjects() { if (required == obj.Size()) return; CHECK_LT(obj.Size(), required); int filler_size = required - obj.Size(); - auto filler = factory->NewFillerObject(filler_size, - AllocationAlignment::kTaggedAligned, - AllocationType::kReadOnly); - CHECK_EQ(filler->address() + filler->Size(), obj.address() + required); + + HeapObject filler = + allocator()->AllocateRawWith( + filler_size, AllocationType::kReadOnly, AllocationOrigin::kRuntime, + AllocationAlignment::kTaggedAligned); + CreateFillerObjectAt(filler.address(), filler_size, + ClearFreedMemoryMode::kClearFreedMemory); + + CHECK_EQ(filler.address() + filler.Size(), obj.address() + required); #endif }; diff --git a/src/objects/string-inl.h b/src/objects/string-inl.h index c7ddd9f696..4ce55c276a 100644 --- a/src/objects/string-inl.h +++ b/src/objects/string-inl.h @@ -1476,6 +1476,26 @@ SubStringRange::iterator SubStringRange::end() { return SubStringRange::iterator(string_, first_ + length_, no_gc_); } +void SeqOneByteString::clear_padding() { + const int data_size = SeqString::kHeaderSize + length() * kOneByteSize; + const int padding_size = SizeFor(length()) - data_size; + DCHECK_EQ((DataAndPaddingSizes{data_size, padding_size}), + GetDataAndPaddingSizes()); + DCHECK_EQ(address() + data_size + padding_size, address() + Size()); + if (padding_size == 0) return; + memset(reinterpret_cast(address() + data_size), 0, padding_size); +} + +void SeqTwoByteString::clear_padding() { + const int data_size = SeqString::kHeaderSize + length() * base::kUC16Size; + const int padding_size = SizeFor(length()) - data_size; + DCHECK_EQ((DataAndPaddingSizes{data_size, padding_size}), + GetDataAndPaddingSizes()); + DCHECK_EQ(address() + data_size + padding_size, address() + Size()); + if (padding_size == 0) return; + memset(reinterpret_cast(address() + data_size), 0, padding_size); +} + // static bool String::IsInPlaceInternalizable(String string) { return IsInPlaceInternalizable(string.map().instance_type()); diff --git a/src/objects/string.cc b/src/objects/string.cc index d996b04345..bf59d1ccdf 100644 --- a/src/objects/string.cc +++ b/src/objects/string.cc @@ -1824,6 +1824,7 @@ Handle SeqString::Truncate(Isolate* isolate, Handle string, // We are storing the new length using release store after creating a filler // for the left-over space to avoid races with the sweeper thread. string->set_length(new_length, kReleaseStore); + string->clear_padding(); return string; } @@ -1849,6 +1850,13 @@ SeqString::DataAndPaddingSizes SeqTwoByteString::GetDataAndPaddingSizes() return DataAndPaddingSizes{data_size, padding_size}; } +void SeqString::clear_padding() { + if (IsSeqOneByteString()) { + return SeqOneByteString::cast(*this).clear_padding(); + } + return SeqTwoByteString::cast(*this).clear_padding(); +} + uint16_t ConsString::Get( int index, PtrComprCageBase cage_base, const SharedStringAccessGuardIfNeeded& access_guard) const { diff --git a/src/objects/string.h b/src/objects/string.h index 047e7501c0..1b3ea5a0a3 100644 --- a/src/objects/string.h +++ b/src/objects/string.h @@ -708,9 +708,15 @@ class SeqString : public TorqueGeneratedSeqString { struct DataAndPaddingSizes { const int data_size; const int padding_size; + bool operator==(const DataAndPaddingSizes& other) const { + return data_size == other.data_size && padding_size == other.padding_size; + } }; DataAndPaddingSizes GetDataAndPaddingSizes() const; + // Zero out the padding bytes of this string. + void clear_padding(); + TQ_OBJECT_CONSTRUCTORS(SeqString) }; @@ -755,6 +761,9 @@ class SeqOneByteString DataAndPaddingSizes GetDataAndPaddingSizes() const; + // Zero out the padding bytes of this string. + inline void clear_padding(); + // Maximal memory usage for a single sequential one-byte string. static const int kMaxCharsSize = kMaxLength; static const int kMaxSize = OBJECT_POINTER_ALIGN(kMaxCharsSize + kHeaderSize); @@ -799,6 +808,9 @@ class SeqTwoByteString DataAndPaddingSizes GetDataAndPaddingSizes() const; + // Zero out the padding bytes of this string. + inline void clear_padding(); + // Maximal memory usage for a single sequential two-byte string. static const int kMaxCharsSize = kMaxLength * 2; static const int kMaxSize = OBJECT_POINTER_ALIGN(kMaxCharsSize + kHeaderSize); diff --git a/src/snapshot/read-only-serializer.cc b/src/snapshot/read-only-serializer.cc index 122c66d691..d4876a2046 100644 --- a/src/snapshot/read-only-serializer.cc +++ b/src/snapshot/read-only-serializer.cc @@ -95,10 +95,16 @@ void ReadOnlySerializer::FinalizeSerialization() { reinterpret_cast
(space->pages()[0])); sink_.PutInt(pos, "first page offset"); for (auto p : space->pages()) { - size_t page_size = p->area_size(); - sink_.PutInt(page_size, "page size"); + // Pages are shrunk, but memory at the end of the area is still + // uninitialized and we do not want to include it in the snapshot. + size_t page_content_bytes = p->HighWaterMark() - p->area_start(); + sink_.PutInt(page_content_bytes, "page content bytes"); +#ifdef MEMORY_SANITIZER + __msan_check_mem_is_initialized(reinterpret_cast(p->area_start()), + static_cast(page_content_bytes)); +#endif sink_.PutRaw(reinterpret_cast(p->area_start()), - static_cast(p->area_size()), "page"); + static_cast(page_content_bytes), "page"); } } else { // This comes right after serialization of the other snapshots, where we @@ -108,7 +114,6 @@ void ReadOnlySerializer::FinalizeSerialization() { VisitRootPointer(Root::kReadOnlyObjectCache, nullptr, FullObjectSlot(&undefined)); SerializeDeferredObjects(); - Pad(); #ifdef DEBUG // Check that every object on read-only heap is reachable (and was @@ -124,6 +129,7 @@ void ReadOnlySerializer::FinalizeSerialization() { } #endif // DEBUG } + Pad(); } bool ReadOnlySerializer::MustBeDeferred(HeapObject object) { diff --git a/src/snapshot/serializer.cc b/src/snapshot/serializer.cc index 3e459b5217..78b23a371c 100644 --- a/src/snapshot/serializer.cc +++ b/src/snapshot/serializer.cc @@ -1197,15 +1197,8 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) { } #ifdef MEMORY_SANITIZER // Check that we do not serialize uninitialized memory. - int msan_bytes_to_output = bytes_to_output; - if (object_->IsSeqString()) { - // SeqStrings may have uninitialized padding bytes. These padding - // bytes are never read and serialized as 0s. - msan_bytes_to_output -= - SeqString::cast(*object_).GetDataAndPaddingSizes().padding_size; - } __msan_check_mem_is_initialized( - reinterpret_cast(object_start + base), msan_bytes_to_output); + reinterpret_cast(object_start + base), bytes_to_output); #endif // MEMORY_SANITIZER PtrComprCageBase cage_base(isolate_); if (object_->IsBytecodeArray(cage_base)) { diff --git a/tools/gcmole/test-expectations.txt b/tools/gcmole/test-expectations.txt index 8ef8616790..d057fa4b1e 100644 --- a/tools/gcmole/test-expectations.txt +++ b/tools/gcmole/test-expectations.txt @@ -4,7 +4,7 @@ tools/gcmole/gcmole-test.cc:30:10: warning: Possibly stale variable due to GCs. tools/gcmole/gcmole-test.cc:28:20: note: Call might cause unexpected GC. isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting); ^ -./src/heap/heap.h:985:21: note: GC call here. +./src/heap/heap.h:988:21: note: GC call here. V8_EXPORT_PRIVATE bool CollectGarbage( ^ tools/gcmole/gcmole-test.cc:48:3: warning: Possible problem with evaluation order with interleaved GCs. From b831775786d576cd1f32655b768bc76c5029dc2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Inf=C3=BChr?= Date: Tue, 20 Dec 2022 15:28:37 +0100 Subject: [PATCH 009/654] [heap] Enable --shared-space by default MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bug: v8:13267 Change-Id: I63c520203975397534d9942e3d78704d2c73ff5f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4105024 Commit-Queue: Dominik Inführ Reviewed-by: Michael Lippautz Cr-Commit-Position: refs/heads/main@{#84960} --- src/flags/flag-definitions.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index 5ea43974f3..86d599180a 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -1271,7 +1271,7 @@ DEFINE_BOOL(separate_gc_phases, false, DEFINE_BOOL(global_gc_scheduling, true, "enable GC scheduling based on global memory") DEFINE_BOOL(gc_global, false, "always perform global GCs") -DEFINE_BOOL(shared_space, false, +DEFINE_BOOL(shared_space, true, "Implement shared heap as shared space on a main isolate.") // TODO(12950): The next two flags only have an effect if From 804be91ab914cdd4d53f5bd5999349e0f341b29f Mon Sep 17 00:00:00 2001 From: legendecas Date: Wed, 21 Dec 2022 00:36:24 +0800 Subject: [PATCH 010/654] [ShadowRealm] side-effect-free inspection on cross-realm exceptions The spec does not allow side effects on wrapping the exceptions crossing the realm boundaries. We need to provide an easy way to inspect the exception-thrown cross-realms according to the last TC39 meeting consensus. Related spec change: https://github.com/tc39/proposal-shadowrealm/pull/382. Bug: v8:11989 Change-Id: Ia78d94fd33cba689267aeacd028d662bd4a37fe9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3618759 Commit-Queue: Chengzhong Wu (legendecas) Reviewed-by: Shu-yu Guo Cr-Commit-Position: refs/heads/main@{#84961} --- src/builtins/builtins-definitions.h | 3 +- src/builtins/builtins-shadow-realm-gen.cc | 25 ++++++++++------ src/builtins/builtins-shadow-realm.cc | 8 ++--- src/common/message-template.h | 7 +++-- src/objects/js-function.cc | 13 ++++---- src/runtime/runtime-shadow-realm.cc | 12 ++++++++ src/runtime/runtime.h | 3 +- test/mjsunit/harmony/shadowrealm-evaluate.js | 21 ++++++++++++- .../harmony/shadowrealm-importvalue.js | 12 ++++++-- .../shadowrealm-skip-3-throw-object.mjs | 2 ++ .../harmony/shadowrealm-wrapped-function.js | 23 ++++++++++++++ .../PrivateAccessorAccess.golden | 8 ++--- .../PrivateMethodAccess.golden | 4 +-- .../StaticPrivateMethodAccess.golden | 30 +++++++++---------- 14 files changed, 123 insertions(+), 48 deletions(-) create mode 100644 test/mjsunit/harmony/shadowrealm-skip-3-throw-object.mjs diff --git a/src/builtins/builtins-definitions.h b/src/builtins/builtins-definitions.h index e0da30ba8b..c8deb23d9f 100644 --- a/src/builtins/builtins-definitions.h +++ b/src/builtins/builtins-definitions.h @@ -896,7 +896,8 @@ namespace internal { kSpecifier, kExportName) \ TFJ(ShadowRealmImportValueFulfilled, kJSArgcReceiverSlots + 1, kReceiver, \ kExports) \ - TFJ(ShadowRealmImportValueRejected, kDontAdaptArgumentsSentinel) \ + TFJ(ShadowRealmImportValueRejected, kJSArgcReceiverSlots + 1, kReceiver, \ + kException) \ \ /* SharedArrayBuffer */ \ CPP(SharedArrayBufferPrototypeGetByteLength) \ diff --git a/src/builtins/builtins-shadow-realm-gen.cc b/src/builtins/builtins-shadow-realm-gen.cc index 11705c2b1f..850fec3949 100644 --- a/src/builtins/builtins-shadow-realm-gen.cc +++ b/src/builtins/builtins-shadow-realm-gen.cc @@ -38,6 +38,9 @@ class ShadowRealmBuiltinsAssembler : public CodeStubAssembler { TNode AllocateImportValueFulfilledFunction( TNode caller_context, TNode eval_context, TNode specifier, TNode export_name); + void ShadowRealmThrow(TNode context, + MessageTemplate fallback_message, + TNode exception); }; TNode ShadowRealmBuiltinsAssembler::AllocateJSWrappedFunction( @@ -97,6 +100,14 @@ void ShadowRealmBuiltinsAssembler::CheckAccessor(TNode array, GotoIfNot(IsAccessorInfo(CAST(value)), bailout); } +void ShadowRealmBuiltinsAssembler::ShadowRealmThrow( + TNode context, MessageTemplate fallback_message, + TNode exception) { + TNode template_index = SmiConstant(static_cast(fallback_message)); + CallRuntime(Runtime::kShadowRealmThrow, context, template_index, exception); + Unreachable(); +} + // https://tc39.es/proposal-shadowrealm/#sec-getwrappedvalue TF_BUILTIN(ShadowRealmGetWrappedValue, ShadowRealmBuiltinsAssembler) { auto context = Parameter(Descriptor::kContext); @@ -285,11 +296,8 @@ TF_BUILTIN(CallWrappedFunction, ShadowRealmBuiltinsAssembler) { // 11. Else, BIND(&call_exception); // 11a. Throw a TypeError exception. - // TODO(v8:11989): provide a non-observable inspection on the - // pending_exception to the newly created TypeError. - // https://github.com/tc39/proposal-shadowrealm/issues/353 - ThrowTypeError(context, MessageTemplate::kCallShadowRealmFunctionThrown, - var_exception.value()); + ShadowRealmThrow(context, MessageTemplate::kCallWrappedFunctionThrew, + var_exception.value()); BIND(&target_not_callable); // A wrapped value should not be non-callable. @@ -416,10 +424,9 @@ TF_BUILTIN(ShadowRealmImportValueFulfilled, ShadowRealmBuiltinsAssembler) { TF_BUILTIN(ShadowRealmImportValueRejected, ShadowRealmBuiltinsAssembler) { TNode context = Parameter(Descriptor::kContext); - // TODO(v8:11989): provide a non-observable inspection on the - // pending_exception to the newly created TypeError. - // https://github.com/tc39/proposal-shadowrealm/issues/353 - ThrowTypeError(context, MessageTemplate::kImportShadowRealmRejected); + TNode exception = Parameter(Descriptor::kException); + ShadowRealmThrow(context, MessageTemplate::kImportShadowRealmRejected, + exception); } } // namespace internal diff --git a/src/builtins/builtins-shadow-realm.cc b/src/builtins/builtins-shadow-realm.cc index a76fd120fb..03f1cceaca 100644 --- a/src/builtins/builtins-shadow-realm.cc +++ b/src/builtins/builtins-shadow-realm.cc @@ -202,11 +202,11 @@ BUILTIN(ShadowRealmPrototypeEvaluate) { *factory->NewError(isolate->syntax_error_function(), message)); } // 21. If result.[[Type]] is not normal, throw a TypeError exception. - // TODO(v8:11989): provide a non-observable inspection on the - // pending_exception to the newly created TypeError. - // https://github.com/tc39/proposal-shadowrealm/issues/353 + Handle string = + Object::NoSideEffectsToString(isolate, pending_exception); THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCallShadowRealmFunctionThrown)); + isolate, + NewTypeError(MessageTemplate::kCallShadowRealmEvaluateThrew, string)); } // 22. Return ? GetWrappedValue(callerRealm, result.[[Value]]). Handle wrapped_result; diff --git a/src/common/message-template.h b/src/common/message-template.h index 84f8afbd10..29b821b66b 100644 --- a/src/common/message-template.h +++ b/src/common/message-template.h @@ -61,13 +61,14 @@ namespace internal { T(CalledNonCallable, "% is not a function") \ T(CalledOnNonObject, "% called on non-object") \ T(CalledOnNullOrUndefined, "% called on null or undefined") \ - T(CallShadowRealmFunctionThrown, "Called throwing ShadowRealm function") \ + T(CallShadowRealmEvaluateThrew, "ShadowRealm evaluate threw (%)") \ T(CallSiteExpectsFunction, \ "CallSite expects wasm object as first or function as second argument, " \ "got <%, %>") \ T(CallSiteMethod, "CallSite method % expects CallSite as receiver") \ T(CallSiteMethodUnsupportedInShadowRealm, \ "CallSite method % is unsupported inside ShadowRealms") \ + T(CallWrappedFunctionThrew, "WrappedFunction threw (%)") \ T(CannotBeShared, "% cannot be shared") \ T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \ T(CannotPreventExt, "Cannot prevent extensions") \ @@ -75,7 +76,7 @@ namespace internal { T(CannotFreezeArrayBufferView, \ "Cannot freeze array buffer views with elements") \ T(CannotSeal, "Cannot seal") \ - T(CannotWrap, "Cannot wrap target callable") \ + T(CannotWrap, "Cannot wrap target callable (%)") \ T(CircularStructure, "Converting circular structure to JSON%") \ T(ConstructAbstractClass, "Abstract class % not directly constructable") \ T(ConstAssign, "Assignment to constant variable.") \ @@ -112,7 +113,7 @@ namespace internal { T(ImportOutsideModule, "Cannot use import statement outside a module") \ T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \ T(ImportMissingSpecifier, "import() requires a specifier") \ - T(ImportShadowRealmRejected, "Cannot import in the ShadowRealm") \ + T(ImportShadowRealmRejected, "Cannot import in ShadowRealm (%)") \ T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \ T(InstanceofNonobjectProto, \ "Function has non-object prototype '%' in instanceof check") \ diff --git a/src/objects/js-function.cc b/src/objects/js-function.cc index 89c481e701..8a1912eb13 100644 --- a/src/objects/js-function.cc +++ b/src/objects/js-function.cc @@ -474,18 +474,19 @@ MaybeHandle JSWrappedFunction::Create( // 8. If result is an Abrupt Completion, throw a TypeError exception. if (is_abrupt.IsNothing()) { DCHECK(isolate->has_pending_exception()); + Handle pending_exception = + Handle(isolate->pending_exception(), isolate); isolate->clear_pending_exception(); - // TODO(v8:11989): provide a non-observable inspection on the - // pending_exception to the newly created TypeError. - // https://github.com/tc39/proposal-shadowrealm/issues/353 // The TypeError thrown is created with creation Realm's TypeError // constructor instead of the executing Realm's. + Handle type_error_function = + Handle(creation_context->type_error_function(), isolate); + Handle string = + Object::NoSideEffectsToString(isolate, pending_exception); THROW_NEW_ERROR_RETURN_VALUE( isolate, - NewError(Handle(creation_context->type_error_function(), - isolate), - MessageTemplate::kCannotWrap), + NewError(type_error_function, MessageTemplate::kCannotWrap, string), {}); } DCHECK(is_abrupt.FromJust()); diff --git a/src/runtime/runtime-shadow-realm.cc b/src/runtime/runtime-shadow-realm.cc index 7db79cc409..9a4174563a 100644 --- a/src/runtime/runtime-shadow-realm.cc +++ b/src/runtime/runtime-shadow-realm.cc @@ -40,5 +40,17 @@ RUNTIME_FUNCTION(Runtime_ShadowRealmImportValue) { return *inner_capability; } +RUNTIME_FUNCTION(Runtime_ShadowRealmThrow) { + DCHECK_EQ(2, args.length()); + HandleScope scope(isolate); + int message_id_smi = args.smi_value_at(0); + Handle value = args.at(1); + + MessageTemplate message_id = MessageTemplateFromInt(message_id_smi); + + Handle string = Object::NoSideEffectsToString(isolate, value); + THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message_id, string)); +} + } // namespace internal } // namespace v8 diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index 90bfc7bb0a..f250123008 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -448,7 +448,8 @@ namespace internal { #define FOR_EACH_INTRINSIC_SHADOW_REALM(F, I) \ F(ShadowRealmWrappedFunctionCreate, 2, 1) \ - F(ShadowRealmImportValue, 1, 1) + F(ShadowRealmImportValue, 1, 1) \ + F(ShadowRealmThrow, 2, 1) #define FOR_EACH_INTRINSIC_STRINGS(F, I) \ F(FlattenString, 1, 1) \ diff --git a/test/mjsunit/harmony/shadowrealm-evaluate.js b/test/mjsunit/harmony/shadowrealm-evaluate.js index cabad58e7e..8288963428 100644 --- a/test/mjsunit/harmony/shadowrealm-evaluate.js +++ b/test/mjsunit/harmony/shadowrealm-evaluate.js @@ -59,4 +59,23 @@ assertThrows(() => shadowRealm.evaluate(` var revocable = Proxy.revocable(() => 1, {}); revocable.revoke(); revocable.proxy; -`), TypeError, "Cannot wrap target callable"); +`), TypeError, "Cannot wrap target callable (TypeError: Cannot perform 'getOwnPropertyDescriptor' on a proxy that has been revoked)"); + +// no-side-effects inspection on thrown error +assertThrows(() => shadowRealm.evaluate(` +throw new Error('foo'); +`), TypeError, "ShadowRealm evaluate threw (Error: foo)"); + +// no-side-effects inspection on thrown error +assertThrows(() => shadowRealm.evaluate(` +globalThis.messageAccessed = false; +const err = new Error('foo'); +Object.defineProperty(err, 'message', { + get: function() { + globalThis.messageAccessed = true; + return 'bar'; + }, +}); +throw err; +`), TypeError, "ShadowRealm evaluate threw (Error)"); +assertFalse(shadowRealm.evaluate('globalThis.messageAccessed')); diff --git a/test/mjsunit/harmony/shadowrealm-importvalue.js b/test/mjsunit/harmony/shadowrealm-importvalue.js index 656201e76a..54007c8190 100644 --- a/test/mjsunit/harmony/shadowrealm-importvalue.js +++ b/test/mjsunit/harmony/shadowrealm-importvalue.js @@ -52,14 +52,22 @@ globalThis.foobar = 'outer-scope'; const promise = shadowRealm.importValue('./shadowrealm-skip-not-found.mjs', 'foo'); // Promise is created in caller realm. assertInstanceof(promise, Promise); - assertThrowsAsync(promise, TypeError, 'Cannot import in the ShadowRealm'); + assertThrowsAsync(promise, TypeError, /Cannot import in ShadowRealm \(Error: .+shadowrealm-skip-not-found\.mjs\)/); } { const promise = shadowRealm.importValue('./shadowrealm-skip-2-throw.mjs', 'foo'); // Promise is created in caller realm. assertInstanceof(promise, Promise); - assertThrowsAsync(promise, TypeError, 'Cannot import in the ShadowRealm'); + assertThrowsAsync(promise, TypeError, 'Cannot import in ShadowRealm (Error: foobar)'); +} + +// no-side-effects inspection on thrown error +{ + const promise = shadowRealm.importValue('./shadowrealm-skip-3-throw-object.mjs', 'foo'); + // Promise is created in caller realm. + assertInstanceof(promise, Promise); + assertThrowsAsync(promise, TypeError, 'Cannot import in ShadowRealm ([object Object])'); } // Invalid args diff --git a/test/mjsunit/harmony/shadowrealm-skip-3-throw-object.mjs b/test/mjsunit/harmony/shadowrealm-skip-3-throw-object.mjs new file mode 100644 index 0000000000..cc6c3fdd73 --- /dev/null +++ b/test/mjsunit/harmony/shadowrealm-skip-3-throw-object.mjs @@ -0,0 +1,2 @@ +export const foo = 'bar'; +throw { message: 'foobar' }; diff --git a/test/mjsunit/harmony/shadowrealm-wrapped-function.js b/test/mjsunit/harmony/shadowrealm-wrapped-function.js index 5c39a71f5b..782167b318 100644 --- a/test/mjsunit/harmony/shadowrealm-wrapped-function.js +++ b/test/mjsunit/harmony/shadowrealm-wrapped-function.js @@ -28,3 +28,26 @@ var proxy = revocable.proxy; assertEquals(proxy(), 1); revocable.revoke(); assertThrows(() => proxy(), TypeError, "Cannot perform 'apply' on a proxy that has been revoked"); + +// no-side-effects inspection on thrown error +var wrapped = shadowRealm.evaluate(`() => { + throw new Error('foo'); +}`); +assertThrows(() => wrapped(), TypeError, "WrappedFunction threw (Error: foo)"); + +// no-side-effects inspection on thrown error +var wrapped = shadowRealm.evaluate(` +globalThis.messageAccessed = false; +() => { + const err = new Error('foo'); + Object.defineProperty(err, 'message', { + get: function() { + globalThis.messageAccessed = true; + return 'bar'; + }, + }); + throw err; +} +`); +assertThrows(() => wrapped(), TypeError, "WrappedFunction threw (Error)"); +assertFalse(shadowRealm.evaluate('globalThis.messageAccessed')); diff --git a/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden b/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden index 03ff2c1085..4e18513dbe 100644 --- a/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden +++ b/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden @@ -83,7 +83,7 @@ bytecodes: [ /* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), /* 53 S> */ B(LdaImmutableCurrentContextSlot), U8(3), /* 58 E> */ B(GetKeyedProperty), R(this), U8(2), - B(Wide), B(LdaSmi), I16(307), + B(Wide), B(LdaSmi), I16(308), B(Star2), B(LdaConstant), U8(0), B(Star3), @@ -115,7 +115,7 @@ bytecodes: [ /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), /* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3), /* 51 E> */ B(GetKeyedProperty), R(this), U8(2), - B(Wide), B(LdaSmi), I16(306), + B(Wide), B(LdaSmi), I16(307), B(Star2), B(LdaConstant), U8(0), B(Star3), @@ -149,7 +149,7 @@ bytecodes: [ B(Star2), B(LdaImmutableCurrentContextSlot), U8(3), /* 58 E> */ B(GetKeyedProperty), R(this), U8(2), - B(Wide), B(LdaSmi), I16(307), + B(Wide), B(LdaSmi), I16(308), B(Star3), B(LdaConstant), U8(0), B(Star4), @@ -181,7 +181,7 @@ bytecodes: [ /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), /* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3), /* 51 E> */ B(GetKeyedProperty), R(this), U8(2), - B(Wide), B(LdaSmi), I16(306), + B(Wide), B(LdaSmi), I16(307), B(Star2), B(LdaConstant), U8(0), B(Star3), diff --git a/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden b/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden index 91d5beae1b..2c602e0adc 100644 --- a/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden +++ b/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden @@ -58,7 +58,7 @@ bytecodes: [ B(Star2), B(LdaImmutableCurrentContextSlot), U8(3), /* 54 E> */ B(GetKeyedProperty), R(this), U8(2), - B(Wide), B(LdaSmi), I16(305), + B(Wide), B(LdaSmi), I16(306), B(Star3), B(LdaConstant), U8(0), B(Star4), @@ -91,7 +91,7 @@ bytecodes: [ /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), /* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3), /* 54 E> */ B(GetKeyedProperty), R(this), U8(2), - B(Wide), B(LdaSmi), I16(305), + B(Wide), B(LdaSmi), I16(306), B(Star2), B(LdaConstant), U8(0), B(Star3), diff --git a/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden b/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden index d3d6c1751a..c61f6f6711 100644 --- a/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden +++ b/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden @@ -24,7 +24,7 @@ bytecodes: [ B(TestReferenceEqual), R(this), B(Mov), R(this), R(1), B(JumpIfTrue), U8(16), - B(Wide), B(LdaSmi), I16(299), + B(Wide), B(LdaSmi), I16(300), B(Star2), B(LdaConstant), U8(0), B(Star3), @@ -61,13 +61,13 @@ bytecodes: [ B(TestReferenceEqual), R(this), B(Mov), R(this), R(0), B(JumpIfTrue), U8(16), - B(Wide), B(LdaSmi), I16(299), + B(Wide), B(LdaSmi), I16(300), B(Star2), B(LdaConstant), U8(0), B(Star3), /* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2), B(Throw), - B(Wide), B(LdaSmi), I16(305), + B(Wide), B(LdaSmi), I16(306), B(Star2), B(LdaConstant), U8(1), B(Star3), @@ -99,13 +99,13 @@ bytecodes: [ B(TestReferenceEqual), R(this), B(Mov), R(this), R(0), B(JumpIfTrue), U8(16), - B(Wide), B(LdaSmi), I16(299), + B(Wide), B(LdaSmi), I16(300), B(Star1), B(LdaConstant), U8(0), B(Star2), /* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2), B(Throw), - B(Wide), B(LdaSmi), I16(305), + B(Wide), B(LdaSmi), I16(306), B(Star1), B(LdaConstant), U8(1), B(Star2), @@ -145,7 +145,7 @@ bytecodes: [ B(TestReferenceEqual), R(this), B(Mov), R(this), R(0), B(JumpIfTrue), U8(16), - B(Wide), B(LdaSmi), I16(299), + B(Wide), B(LdaSmi), I16(300), B(Star2), B(LdaConstant), U8(0), B(Star3), @@ -167,7 +167,7 @@ bytecodes: [ B(TestReferenceEqual), R(this), B(Mov), R(this), R(0), B(JumpIfTrue), U8(16), - B(Wide), B(LdaSmi), I16(299), + B(Wide), B(LdaSmi), I16(300), B(Star3), B(LdaConstant), U8(0), B(Star4), @@ -182,7 +182,7 @@ bytecodes: [ B(TestReferenceEqual), R(this), B(Mov), R(this), R(0), B(JumpIfTrue), U8(16), - B(Wide), B(LdaSmi), I16(299), + B(Wide), B(LdaSmi), I16(300), B(Star2), B(LdaConstant), U8(0), B(Star3), @@ -216,13 +216,13 @@ bytecodes: [ B(TestReferenceEqual), R(this), B(Mov), R(this), R(0), B(JumpIfTrue), U8(16), - B(Wide), B(LdaSmi), I16(299), + B(Wide), B(LdaSmi), I16(300), B(Star1), B(LdaConstant), U8(0), B(Star2), /* 65 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2), B(Throw), - B(Wide), B(LdaSmi), I16(307), + B(Wide), B(LdaSmi), I16(308), B(Star1), B(LdaConstant), U8(1), B(Star2), @@ -253,13 +253,13 @@ bytecodes: [ B(TestReferenceEqual), R(this), B(Mov), R(this), R(0), B(JumpIfTrue), U8(16), - B(Wide), B(LdaSmi), I16(299), + B(Wide), B(LdaSmi), I16(300), B(Star1), B(LdaConstant), U8(0), B(Star2), /* 58 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2), B(Throw), - B(Wide), B(LdaSmi), I16(306), + B(Wide), B(LdaSmi), I16(307), B(Star1), B(LdaConstant), U8(1), B(Star2), @@ -292,13 +292,13 @@ bytecodes: [ B(TestReferenceEqual), R(this), B(Mov), R(this), R(0), B(JumpIfTrue), U8(16), - B(Wide), B(LdaSmi), I16(299), + B(Wide), B(LdaSmi), I16(300), B(Star2), B(LdaConstant), U8(0), B(Star3), /* 65 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2), B(Throw), - B(Wide), B(LdaSmi), I16(307), + B(Wide), B(LdaSmi), I16(308), B(Star2), B(LdaConstant), U8(1), B(Star3), @@ -327,7 +327,7 @@ bytecode array length: 19 bytecodes: [ /* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3), /* 51 E> */ B(GetKeyedProperty), R(this), U8(0), - B(Wide), B(LdaSmi), I16(306), + B(Wide), B(LdaSmi), I16(307), B(Star1), B(LdaConstant), U8(0), B(Star2), From 5d1e0837a4ad206040f930b471916f2ac1f6e53b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Inf=C3=BChr?= Date: Tue, 20 Dec 2022 19:56:25 +0000 Subject: [PATCH 011/654] Revert "[heap] Enable --shared-space by default" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit b831775786d576cd1f32655b768bc76c5029dc2a. Reason for revert: Causes failures on bots. Original change's description: > [heap] Enable --shared-space by default > > Bug: v8:13267 > Change-Id: I63c520203975397534d9942e3d78704d2c73ff5f > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4105024 > Commit-Queue: Dominik Inführ > Reviewed-by: Michael Lippautz > Cr-Commit-Position: refs/heads/main@{#84960} Bug: v8:13267 Change-Id: I0a046aa4a1068228cd56247512de0934db8a504f No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4116859 Auto-Submit: Dominik Inführ Bot-Commit: Rubber Stamper Commit-Queue: Rubber Stamper Cr-Commit-Position: refs/heads/main@{#84962} --- src/flags/flag-definitions.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index 86d599180a..5ea43974f3 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -1271,7 +1271,7 @@ DEFINE_BOOL(separate_gc_phases, false, DEFINE_BOOL(global_gc_scheduling, true, "enable GC scheduling based on global memory") DEFINE_BOOL(gc_global, false, "always perform global GCs") -DEFINE_BOOL(shared_space, true, +DEFINE_BOOL(shared_space, false, "Implement shared heap as shared space on a main isolate.") // TODO(12950): The next two flags only have an effect if From 2a71e8637f0ee70d11873abef7cda8c772f7a8ad Mon Sep 17 00:00:00 2001 From: Nikolaos Papaspyrou Date: Tue, 20 Dec 2022 15:53:22 +0100 Subject: [PATCH 012/654] Reland "[heap] Fix conservative stack scanning for client isolates" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a reland of commit 36bac1bcae1020fce5b9dfe54522d33df80b2dd6 Original change's description: > [heap] Fix conservative stack scanning for client isolates > > With this CL, the context of stacks corresponding to all client isolates > are saved, so that conservative stack scanning can be used correctly > during a shared garbage collection. This happens: > > 1) in Heap::PerformSharedGarbageCollection, for the stacks of the shared > isolate and the initiator; > 2) when an isolate's main thread is waiting in a safepoint; and > 3) when an isolate's main thread is parked. > > Bug: v8:13257 > Change-Id: I9ff060f2c0c1ec12977c70d67d65d9c543e2d165 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4027210 > Reviewed-by: Michael Lippautz > Reviewed-by: Omer Katz > Commit-Queue: Nikolaos Papaspyrou > Reviewed-by: Dominik Inführ > Cr-Commit-Position: refs/heads/main@{#84712} Bug: v8:13257 Change-Id: I61df6eeca5a28e04eb3a858f7d601bc5f6312e49 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4092737 Reviewed-by: Dominik Inführ Reviewed-by: Omer Katz Commit-Queue: Nikolaos Papaspyrou Cr-Commit-Position: refs/heads/main@{#84963} --- src/heap/heap.cc | 47 ++- src/heap/heap.h | 12 +- src/heap/local-heap.cc | 23 +- src/heap/local-heap.h | 19 + .../cctest/heap/test-concurrent-allocation.cc | 2 +- test/cctest/heap/test-heap.cc | 23 +- .../conservative-stack-visitor-unittest.cc | 393 ++++++++++++++++++ 7 files changed, 482 insertions(+), 37 deletions(-) diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 3c172a09c0..0852c8b8fa 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -2385,6 +2385,8 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator, DCHECK_NOT_NULL(isolate()->global_safepoint()); SaveStackContextScope stack_context_scope(&stack()); + SaveStackContextScope initiator_stack_context_scope( + &initiator->thread_local_top()->stack_); isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) { client->heap()->FreeSharedLinearAllocationAreas(); @@ -4621,7 +4623,8 @@ class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor { #endif // V8_COMPRESS_POINTERS }; -void Heap::IterateRoots(RootVisitor* v, base::EnumSet options) { +void Heap::IterateRoots(RootVisitor* v, base::EnumSet options, + IterateRootsMode mode) { v->VisitRootPointers(Root::kStrongRootList, nullptr, roots_table().strong_roots_begin(), roots_table().strong_roots_end()); @@ -4691,7 +4694,7 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet options) { StackState stack_state = options.contains(SkipRoot::kConservativeStack) ? StackState::kNoHeapPointers : StackState::kMayContainHeapPointers; - IterateStackRoots(v, stack_state); + IterateStackRoots(v, stack_state, mode); v->Synchronize(VisitorSynchronization::kStackRoots); } @@ -4812,13 +4815,13 @@ void Heap::IterateRootsIncludingClients(RootVisitor* v, if (isolate()->is_shared_heap_isolate()) { ClientRootVisitor client_root_visitor(v); - // TODO(v8:13257): We cannot run CSS on client isolates now, as the - // stack markers will not be correct. - options.Add(SkipRoot::kConservativeStack); + // When iterating roots of clients, we assume it will be necessary to scan + // their stacks conservatively regardless of the main isolate's stack state. + options.Remove(SkipRoot::kConservativeStack); isolate()->global_safepoint()->IterateClientIsolates( [v = &client_root_visitor, options](Isolate* client) { if (client->is_shared_heap_isolate()) return; - client->heap()->IterateRoots(v, options); + client->heap()->IterateRoots(v, options, IterateRootsMode::kShared); }); } } @@ -4831,9 +4834,12 @@ void Heap::IterateRootsFromStackIncludingClients(RootVisitor* v, ClientRootVisitor client_root_visitor(v); isolate()->global_safepoint()->IterateClientIsolates( [v = &client_root_visitor](Isolate* client) { - // TODO(v8:13257): We cannot run CSS on client isolates now, as the - // stack markers will not be correct. - client->heap()->IterateStackRoots(v, StackState::kNoHeapPointers); + if (client->is_shared_heap_isolate()) return; + // When iterating stack roots of clients, we assume they may contain + // heap pointers regardless of the main isolate's stack state. + client->heap()->IterateStackRoots(v, + StackState::kMayContainHeapPointers, + IterateRootsMode::kShared); }); } } @@ -4861,15 +4867,24 @@ void Heap::IterateBuiltins(RootVisitor* v) { static_assert(Builtins::AllBuiltinsAreIsolateIndependent()); } -void Heap::IterateStackRoots(RootVisitor* v, StackState stack_state) { - isolate_->Iterate(v); +void Heap::IterateStackRoots(RootVisitor* v, StackState stack_state, + IterateRootsMode mode) { + isolate()->Iterate(v); #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING - if (stack_state == StackState::kMayContainHeapPointers && - !disable_conservative_stack_scanning_for_testing_) { - ConservativeStackVisitor stack_visitor(isolate(), v); - stack().IteratePointers(&stack_visitor); - } + if (stack_state == StackState::kNoHeapPointers || + disable_conservative_stack_scanning_for_testing_) + return; + + // In case of a shared GC, we're interested in the main isolate for CSS. + Isolate* main_isolate = mode == IterateRootsMode::kShared + ? isolate()->shared_heap_isolate() + : isolate(); + if (main_isolate->heap()->disable_conservative_stack_scanning_for_testing_) + return; + + ConservativeStackVisitor stack_visitor(main_isolate, v); + stack().IteratePointers(&stack_visitor); #endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING } diff --git a/src/heap/heap.h b/src/heap/heap.h index 39e6efb080..fe29659ce6 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -1039,8 +1039,13 @@ class Heap { // garbage collection and is usually only performed as part of // (de)serialization or heap verification. + // In the case of shared GC, kLocal is used for the main isolate and kShared + // for the (other) client isolates. + enum class IterateRootsMode { kLocal, kShared }; + // Iterates over the strong roots and the weak roots. - void IterateRoots(RootVisitor* v, base::EnumSet options); + void IterateRoots(RootVisitor* v, base::EnumSet options, + IterateRootsMode mode = IterateRootsMode::kLocal); void IterateRootsIncludingClients(RootVisitor* v, base::EnumSet options); void IterateRootsFromStackIncludingClients(RootVisitor* v, @@ -1053,7 +1058,8 @@ class Heap { void IterateWeakRoots(RootVisitor* v, base::EnumSet options); void IterateWeakGlobalHandles(RootVisitor* v); void IterateBuiltins(RootVisitor* v); - void IterateStackRoots(RootVisitor* v, StackState stack_state); + void IterateStackRoots(RootVisitor* v, StackState stack_state, + IterateRootsMode mode = IterateRootsMode::kLocal); // =========================================================================== // Remembered set API. ======================================================= @@ -2692,7 +2698,7 @@ class V8_NODISCARD DisableConservativeStackScanningScopeForTesting { heap_->disable_conservative_stack_scanning_for_testing_ = old_value_; } - protected: + private: Heap* heap_; bool old_value_; }; diff --git a/src/heap/local-heap.cc b/src/heap/local-heap.cc index ab357ef9e3..35b3b74c85 100644 --- a/src/heap/local-heap.cc +++ b/src/heap/local-heap.cc @@ -239,7 +239,9 @@ void LocalHeap::ParkSlowPath() { if (current_state.IsCollectionRequested()) { if (!heap()->ignore_local_gc_requests()) { + ClearStackContext(); heap_->CollectGarbageForBackground(this); + SaveStackContext(); continue; } @@ -294,7 +296,9 @@ void LocalHeap::UnparkSlowPath() { continue; if (!heap()->ignore_local_gc_requests()) { + ClearStackContext(); heap_->CollectGarbageForBackground(this); + SaveStackContext(); } return; @@ -365,14 +369,19 @@ void LocalHeap::SleepInSafepoint() { TRACE_GC1(heap_->tracer(), scope_id, thread_kind); - // Parking the running thread here is an optimization. We do not need to - // wake this thread up to reach the next safepoint. - ThreadState old_state = state_.SetParked(); - CHECK(old_state.IsRunning()); - CHECK(old_state.IsSafepointRequested()); - CHECK_IMPLIES(old_state.IsCollectionRequested(), is_main_thread()); + { + base::Optional stack_context_scope; + if (is_main_thread()) stack_context_scope.emplace(&heap_->stack()); - heap_->safepoint()->WaitInSafepoint(); + // Parking the running thread here is an optimization. We do not need to + // wake this thread up to reach the next safepoint. + ThreadState old_state = state_.SetParked(); + CHECK(old_state.IsRunning()); + CHECK(old_state.IsSafepointRequested()); + CHECK_IMPLIES(old_state.IsCollectionRequested(), is_main_thread()); + + heap_->safepoint()->WaitInSafepoint(); + } base::Optional ignore_gc_requests; if (is_main_thread()) ignore_gc_requests.emplace(heap()); diff --git a/src/heap/local-heap.h b/src/heap/local-heap.h index 744dc2edda..982a103434 100644 --- a/src/heap/local-heap.h +++ b/src/heap/local-heap.h @@ -282,6 +282,7 @@ class V8_EXPORT_PRIVATE LocalHeap { void Park() { DCHECK(AllowSafepoints::IsAllowed()); + SaveStackContextIfMainThread(); ThreadState expected = ThreadState::Running(); if (!state_.CompareExchangeWeak(expected, ThreadState::Parked())) { ParkSlowPath(); @@ -294,6 +295,7 @@ class V8_EXPORT_PRIVATE LocalHeap { if (!state_.CompareExchangeWeak(expected, ThreadState::Running())) { UnparkSlowPath(); } + ClearStackContextIfMainThread(); } void ParkSlowPath(); @@ -312,6 +314,21 @@ class V8_EXPORT_PRIVATE LocalHeap { void SetUp(); void SetUpSharedMarking(); + void SaveStackContext() { + DCHECK(!stack_context_scope_.has_value()); + stack_context_scope_.emplace(&heap_->stack()); + } + + void SaveStackContextIfMainThread() { + if (is_main_thread()) SaveStackContext(); + } + + void ClearStackContext() { stack_context_scope_.reset(); } + + void ClearStackContextIfMainThread() { + if (is_main_thread()) ClearStackContext(); + } + Heap* heap_; bool is_main_thread_; @@ -338,6 +355,8 @@ class V8_EXPORT_PRIVATE LocalHeap { MarkingBarrier* saved_marking_barrier_ = nullptr; + base::Optional stack_context_scope_; + friend class CollectionBarrier; friend class ConcurrentAllocator; friend class GlobalSafepoint; diff --git a/test/cctest/heap/test-concurrent-allocation.cc b/test/cctest/heap/test-concurrent-allocation.cc index 79ca12cc32..08f1aebc05 100644 --- a/test/cctest/heap/test-concurrent-allocation.cc +++ b/test/cctest/heap/test-concurrent-allocation.cc @@ -182,7 +182,7 @@ UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadParksAndUnparks) { threads.push_back(std::move(thread)); } - for (int i = 0; i < 300'000; i++) { + for (int i = 0; i < 30'000; i++) { ParkedScope scope(i_isolate->main_thread_local_isolate()); } diff --git a/test/cctest/heap/test-heap.cc b/test/cctest/heap/test-heap.cc index 77ff826998..df280cd61e 100644 --- a/test/cctest/heap/test-heap.cc +++ b/test/cctest/heap/test-heap.cc @@ -6870,17 +6870,20 @@ UNINITIALIZED_TEST(RestoreHeapLimit) { heap->AutomaticallyRestoreInitialHeapLimit(0.5); const int kFixedArrayLength = 1000000; { - HandleScope handle_scope(isolate); - while (!state.oom_triggered) { - factory->NewFixedArray(kFixedArrayLength); + DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap); + { + HandleScope handle_scope(isolate); + while (!state.oom_triggered) { + factory->NewFixedArray(kFixedArrayLength); + } } - } - heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true); - state.oom_triggered = false; - { - HandleScope handle_scope(isolate); - while (!state.oom_triggered) { - factory->NewFixedArray(kFixedArrayLength); + heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true); + state.oom_triggered = false; + { + HandleScope handle_scope(isolate); + while (!state.oom_triggered) { + factory->NewFixedArray(kFixedArrayLength); + } } } CHECK_EQ(state.current_heap_limit, state.initial_heap_limit); diff --git a/test/unittests/heap/conservative-stack-visitor-unittest.cc b/test/unittests/heap/conservative-stack-visitor-unittest.cc index f8cfc94162..b43f596d4c 100644 --- a/test/unittests/heap/conservative-stack-visitor-unittest.cc +++ b/test/unittests/heap/conservative-stack-visitor-unittest.cc @@ -4,6 +4,9 @@ #include "src/heap/conservative-stack-visitor.h" +#include "src/base/optional.h" +#include "src/base/platform/semaphore.h" +#include "src/heap/parked-scope.h" #include "test/unittests/heap/heap-utils.h" #include "test/unittests/test-utils.h" @@ -233,5 +236,395 @@ TEST_F(ConservativeStackVisitorTest, InnerHalfWord2) { #endif // V8_COMPRESS_POINTERS +#if V8_CAN_CREATE_SHARED_HEAP_BOOL + +using ConservativeStackScanningSharedTest = TestJSSharedMemoryWithIsolate; + +namespace { + +// An abstract class for threads that will be used in tests related to +// conservative stack scanning of the shared heap. When running, after +// initialization, it invokes the virtual method `RunTheTest`. The class +// provides basic functionality for allocating an object on the shared heap, +// synchronizing with the main thread (which triggers a GC), and verifying that +// the object has (or has not) been reclaimed by the GC. +class TestStackContextWithSharedHeapThread : public ParkingThread { + public: + TestStackContextWithSharedHeapThread(const char* name, StackState stack_state, + v8::base::Semaphore* sema_ready, + v8::base::Semaphore* sema_gc_complete) + + : ParkingThread(base::Thread::Options(name)), + stack_state_(stack_state), + sema_ready_(sema_ready), + sema_gc_complete_(sema_gc_complete) {} + + void Run() override { + IsolateWrapper isolate_wrapper(kNoCounters); + Isolate* i_client_isolate = + reinterpret_cast(isolate_wrapper.isolate()); + + base::Optional scope; + if (stack_state_ == StackState::kNoHeapPointers) + scope.emplace(i_client_isolate->heap()); + + RunTheTest(i_client_isolate); + } + + virtual void RunTheTest(Isolate* i_client_isolate) = 0; + + // Signal to the main thread to invoke a shared GC, then wait in a safepoint + // until the GC is finished. + V8_INLINE void SignalReadyAndWait(Isolate* i_client_isolate) { + sema_ready_->Signal(); + const auto timeout = base::TimeDelta::FromMilliseconds(100); + do { + i_client_isolate->main_thread_local_isolate()->heap()->Safepoint(); + } while (!sema_gc_complete_->WaitFor(timeout)); + } + + // Allocate an object on the shared heap and add a weak reference. + // Also, allocate some garbage. Return the address of the allocated object. + V8_INLINE Address AllocateObjectAndGarbage(Isolate* i_client_isolate, + Persistent& weak) { + HandleScope handle_scope(i_client_isolate); + Handle h = i_client_isolate->factory()->NewFixedArray( + 256, AllocationType::kSharedOld); + weak.Reset(reinterpret_cast(i_client_isolate), + Utils::FixedArrayToLocal(h)); + weak.SetWeak(); + + // Allocate some garbage on the shared heap. + for (int i = 0; i < 10; ++i) { + i_client_isolate->factory()->NewFixedArray(256, + AllocationType::kSharedOld); + } + + return h->GetHeapObject().address(); + } + + // Check whether an object has been reclaimed by GC. + V8_INLINE void VerifyObject(const Persistent& weak) { + switch (stack_state_) { + case StackState::kNoHeapPointers: + EXPECT_TRUE(weak.IsEmpty()); + break; + case StackState::kMayContainHeapPointers: + EXPECT_FALSE(weak.IsEmpty()); + break; + } + } + + bool IsPreciseGC() const { + return stack_state_ == StackState::kNoHeapPointers; + } + + private: + StackState stack_state_; + v8::base::Semaphore* sema_ready_; + v8::base::Semaphore* sema_gc_complete_; +}; + +// Generic test template for conservative stack scanning of the shared heap. The +// `TestThread` must be a subclass of `TestStackContextWithSharedHeapThread`. +template +void StackContextWithSharedHeapTest(Isolate* isolate, StackState stack_state) { + v8::base::Semaphore sema_thread_ready(0); + v8::base::Semaphore sema_gc_complete(0); + + auto thread = std::make_unique(stack_state, &sema_thread_ready, + &sema_gc_complete); + CHECK(thread->Start()); + + // Wait for the thread to be ready. + sema_thread_ready.Wait(); + + // Invoke shared garbage collection. + isolate->heap()->CollectGarbageShared(isolate->main_thread_local_heap(), + GarbageCollectionReason::kTesting); + + // Signal that the GC has been complete. + sema_gc_complete.Signal(); + + ParkedScope scope(isolate->main_thread_local_isolate()); + thread->ParkedJoin(scope); +} + +// Test scenario #1: The thread just waits, so it is forced into a safepoint. +class TestWaitThread final : public TestStackContextWithSharedHeapThread { + public: + TestWaitThread(StackState stack_state, v8::base::Semaphore* sema_ready, + v8::base::Semaphore* sema_gc_complete) + : TestStackContextWithSharedHeapThread("TestWaitThread", stack_state, + sema_ready, sema_gc_complete) {} + + void RunTheTest(Isolate* i_client_isolate) override { + Persistent weak; + volatile Address ptr_on_stack = + AllocateObjectAndGarbage(i_client_isolate, weak); + + SignalReadyAndWait(i_client_isolate); + + // Make sure to keep the pointer alive. + EXPECT_NE(static_cast(0), ptr_on_stack); + + VerifyObject(weak); + } +}; + +// Test scenario #2: The thread parks and waits. +class TestParkWaitThread final : public TestStackContextWithSharedHeapThread { + public: + TestParkWaitThread(StackState stack_state, v8::base::Semaphore* sema_ready, + v8::base::Semaphore* sema_gc_complete) + : TestStackContextWithSharedHeapThread("TestParkWaitThread", stack_state, + sema_ready, sema_gc_complete) {} + + void RunTheTest(Isolate* i_client_isolate) override { + Persistent weak; + volatile Address ptr_on_stack = + AllocateObjectAndGarbage(i_client_isolate, weak); + + ParkedScope parked_scope(i_client_isolate->main_thread_local_isolate()); + SignalReadyAndWait(i_client_isolate); + + // Make sure to keep the pointer alive. + EXPECT_NE(static_cast(0), ptr_on_stack); + + VerifyObject(weak); + } +}; + +// Test scenario #3: The thread parks, then unparks and waits, so it is forced +// into a safepoint. +class TestParkUnparkWaitThread final + : public TestStackContextWithSharedHeapThread { + public: + TestParkUnparkWaitThread(StackState stack_state, + v8::base::Semaphore* sema_ready, + v8::base::Semaphore* sema_gc_complete) + : TestStackContextWithSharedHeapThread("TestParkUnparkWaitThread", + stack_state, sema_ready, + sema_gc_complete) {} + + void RunTheTest(Isolate* i_client_isolate) override { + Persistent weak; + volatile Address ptr_on_stack = + AllocateObjectAndGarbage(i_client_isolate, weak); + + ParkedScope parked_scope(i_client_isolate->main_thread_local_isolate()); + + // Call KeepRunning, which is not inlined, to add a frame on the stack. + KeepRunning(i_client_isolate); + + // Make sure to keep the pointer alive. + EXPECT_NE(static_cast(0), ptr_on_stack); + + VerifyObject(weak); + } + + V8_NOINLINE void KeepRunning(Isolate* i_client_isolate) { + UnparkedScope unparked_scope(i_client_isolate->main_thread_local_isolate()); + + Persistent weak; + volatile Address ptr_on_stack = + AllocateObjectAndGarbage(i_client_isolate, weak); + + SignalReadyAndWait(i_client_isolate); + + // Make sure to keep the pointer alive. + EXPECT_NE(static_cast(0), ptr_on_stack); + + VerifyObject(weak); + } +}; + +// Test scenario #4: The thread parks, then unparks, then parks again and waits. +class TestParkUnparkParkWaitThread final + : public TestStackContextWithSharedHeapThread { + public: + TestParkUnparkParkWaitThread(StackState stack_state, + v8::base::Semaphore* sema_ready, + v8::base::Semaphore* sema_gc_complete) + : TestStackContextWithSharedHeapThread("TestParkUnparkParkWaitThread", + stack_state, sema_ready, + sema_gc_complete) {} + + void RunTheTest(Isolate* i_client_isolate) override { + Persistent weak; + volatile Address ptr_on_stack = + AllocateObjectAndGarbage(i_client_isolate, weak); + + ParkedScope parked_scope(i_client_isolate->main_thread_local_isolate()); + + // Call KeepRunning, which is not inlined, to add a frame on the stack. + KeepRunning(i_client_isolate); + + // Make sure to keep the pointer alive. + EXPECT_NE(static_cast(0), ptr_on_stack); + + VerifyObject(weak); + } + + V8_NOINLINE void KeepRunning(Isolate* i_client_isolate) { + UnparkedScope unparked_scope(i_client_isolate->main_thread_local_isolate()); + + Persistent weak; + volatile Address ptr_on_stack = + AllocateObjectAndGarbage(i_client_isolate, weak); + + // Call KeepRunningStill, which is not inlined, to add one more frame on the + // stack. + KeepRunningStill(i_client_isolate); + + // Make sure to keep the pointer alive. + EXPECT_NE(static_cast(0), ptr_on_stack); + + VerifyObject(weak); + } + + V8_NOINLINE void KeepRunningStill(Isolate* i_client_isolate) { + ParkedScope parked_scope(i_client_isolate->main_thread_local_isolate()); + SignalReadyAndWait(i_client_isolate); + } +}; + +// Test scenario #5: The thread parks, then unparks, parks again by unrolling +// the stack and waits. +class TestParkUnparkUnrollWaitThread final + : public TestStackContextWithSharedHeapThread { + public: + TestParkUnparkUnrollWaitThread(StackState stack_state, + v8::base::Semaphore* sema_ready, + v8::base::Semaphore* sema_gc_complete) + : TestStackContextWithSharedHeapThread("TestParkUnparkUnrollWaitThread", + stack_state, sema_ready, + sema_gc_complete) {} + + struct AllocationInfo { + Persistent* weak; + volatile Address* ptr = nullptr; + }; + + void RunTheTest(Isolate* i_client_isolate) override { + Persistent weak, weak0, weak1, weak2; + volatile Address ptr_on_stack = + AllocateObjectAndGarbage(i_client_isolate, weak); + + ParkedScope parked_scope(i_client_isolate->main_thread_local_isolate()); + + // Call KeepRunning, which is not inlined, to roll and then unroll the + // stack. + std::vector info = {{&weak0}, {&weak1}, {&weak2}}; + KeepRunning(i_client_isolate, info, 0); + + // Make sure to keep the pointer alive. + EXPECT_NE(static_cast(0), ptr_on_stack); + + VerifyObject(weak); + + // The object referenced by weak0 must be live with CSS, as it there was a + // pointer to it above the stack top. + DCHECK_LT(kPointerDepth0, kUnrollDepth); + VerifyObject(weak0); + + // The object referenced by weak1 may or may not be reclaimed with CSS, as + // there was a pointer to it above the last saved stacked context but below + // the stack top. It should always be reclaimed without CSS. + DCHECK_LT(kUnrollDepth, kPointerDepth1); + DCHECK_LT(kPointerDepth1, kUnparkDepth); + if (IsPreciseGC()) VerifyObject(weak1); + + // The object referenced by weak2 must be always reclaimed (modulo false + // positives for CSS), as the pointer to it was below the last saved stack + // context. + DCHECK_LT(kUnparkDepth, kPointerDepth2); + EXPECT_TRUE(weak2.IsEmpty()); + } + + static constexpr int kPointerDepth0 = 17; + static constexpr int kUnrollDepth = 42; + static constexpr int kPointerDepth1 = 57; + static constexpr int kUnparkDepth = 71; + static constexpr int kPointerDepth2 = 87; + static constexpr int kAllocationDepth = 100; + + V8_NOINLINE void KeepRunning(Isolate* i_client_isolate, + std::vector& info, int depth) { + // At three different recursion depths, store pointers to objects that will + // be allocated later. + if (depth == kPointerDepth0) { + volatile Address ptr_on_stack; + info[0].ptr = &ptr_on_stack; + KeepRunning(i_client_isolate, info, depth + 1); + // Make sure to keep the pointer alive. + EXPECT_NE(static_cast(0), ptr_on_stack); + return; + } + if (depth == kPointerDepth1) { + volatile Address ptr_on_stack; + info[1].ptr = &ptr_on_stack; + KeepRunning(i_client_isolate, info, depth + 1); + // Make sure to keep the pointer alive. + EXPECT_NE(static_cast(0), ptr_on_stack); + return; + } + if (depth == kPointerDepth2) { + volatile Address ptr_on_stack; + info[2].ptr = &ptr_on_stack; + KeepRunning(i_client_isolate, info, depth + 1); + // Make sure to keep the pointer alive. + EXPECT_NE(static_cast(0), ptr_on_stack); + return; + } + // At this depth, wait for GC when unrolling the stack. + if (depth == kUnrollDepth) { + KeepRunning(i_client_isolate, info, depth + 1); + SignalReadyAndWait(i_client_isolate); + return; + } + // At this depth, unpark when rolling and park again when unrolling. + if (depth == kUnparkDepth) { + UnparkedScope unparked_scope( + i_client_isolate->main_thread_local_isolate()); + KeepRunning(i_client_isolate, info, depth + 1); + return; + } + // Keep recursing until the end is reached. + if (depth < kAllocationDepth) { + KeepRunning(i_client_isolate, info, depth + 1); + return; + } + // The end of the recursion: allocate objects and store pointers at + // various recursion depths. + for (auto i : info) + *i.ptr = AllocateObjectAndGarbage(i_client_isolate, *i.weak); + } +}; + +} // namespace + +#define TEST_SCENARIO(name) \ + TEST_F(ConservativeStackScanningSharedTest, \ + StackContextWith##name##Precise) { \ + StackContextWithSharedHeapTest( \ + i_isolate(), StackState::kNoHeapPointers); \ + } \ + TEST_F(ConservativeStackScanningSharedTest, \ + StackContextWith##name##Conservative) { \ + StackContextWithSharedHeapTest( \ + i_isolate(), StackState::kMayContainHeapPointers); \ + } + +TEST_SCENARIO(Wait) +TEST_SCENARIO(ParkWait) +TEST_SCENARIO(ParkUnparkWait) +TEST_SCENARIO(ParkUnparkParkWait) +TEST_SCENARIO(ParkUnparkUnrollWait) + +#undef TEST_SCENARIO + +#endif // V8_CAN_CREATE_SHARED_HEAP_BOOL + } // namespace internal } // namespace v8 From f51e0bb1db67cfa1b4ac11b13e5cbee0b8601149 Mon Sep 17 00:00:00 2001 From: Nikolaos Papaspyrou Date: Tue, 20 Dec 2022 18:58:21 +0100 Subject: [PATCH 013/654] [heap] Merge mechanisms for disabling CSS EmbedderStackStateScope is used to disable conservative stack scanning for cppgc when the stack is known to not contain heap pointers. Also, DisableConservativeStackScanningScopeForTesting is used to disable CSS for the V8 heap in tests that assume a precise GC. Until now, these two have used two different mechanisms for disabling CSS. This CL merges the two mechanisms and implements the latter scope via the former. Bug: v8:13257 Change-Id: Ieca082657854fe2eff9eb5d95a30d48bb8eab44f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111954 Reviewed-by: Michael Lippautz Commit-Queue: Nikolaos Papaspyrou Cr-Commit-Position: refs/heads/main@{#84964} --- src/heap/heap.cc | 16 +++++----- src/heap/heap.h | 29 +++++++------------ src/heap/mark-compact.cc | 3 +- .../heap/cppgc-js/unified-heap-unittest.cc | 22 +++++++++----- .../heap/embedder-tracing-unittest.cc | 12 -------- 5 files changed, 34 insertions(+), 48 deletions(-) diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 0852c8b8fa..e9d83704c4 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -4872,16 +4872,16 @@ void Heap::IterateStackRoots(RootVisitor* v, StackState stack_state, isolate()->Iterate(v); #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING - if (stack_state == StackState::kNoHeapPointers || - disable_conservative_stack_scanning_for_testing_) - return; + if (stack_state == StackState::kNoHeapPointers || !IsGCWithStack()) return; // In case of a shared GC, we're interested in the main isolate for CSS. - Isolate* main_isolate = mode == IterateRootsMode::kShared - ? isolate()->shared_heap_isolate() - : isolate(); - if (main_isolate->heap()->disable_conservative_stack_scanning_for_testing_) - return; + Isolate* main_isolate; + if (mode == IterateRootsMode::kShared) { + main_isolate = isolate()->shared_heap_isolate(); + if (!main_isolate->heap()->IsGCWithStack()) return; + } else { + main_isolate = isolate(); + } ConservativeStackVisitor stack_visitor(main_isolate, v); stack().IteratePointers(&stack_visitor); diff --git a/src/heap/heap.h b/src/heap/heap.h index fe29659ce6..ff97369f80 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -2409,7 +2409,6 @@ class Heap { bool force_oom_ = false; bool force_gc_on_next_allocation_ = false; bool delay_sweeper_tasks_for_testing_ = false; - bool disable_conservative_stack_scanning_for_testing_ = false; UnorderedHeapObjectMap retainer_; UnorderedHeapObjectMap retaining_root_; @@ -2686,23 +2685,6 @@ class V8_EXPORT_PRIVATE V8_NODISCARD SaveStackContextScope { ::heap::base::Stack* stack_; }; -class V8_NODISCARD DisableConservativeStackScanningScopeForTesting { - public: - explicit inline DisableConservativeStackScanningScopeForTesting(Heap* heap) - : heap_(heap), - old_value_(heap_->disable_conservative_stack_scanning_for_testing_) { - heap_->disable_conservative_stack_scanning_for_testing_ = true; - } - - inline ~DisableConservativeStackScanningScopeForTesting() { - heap_->disable_conservative_stack_scanning_for_testing_ = old_value_; - } - - private: - Heap* heap_; - bool old_value_; -}; - // Space iterator for iterating over all the paged spaces of the heap: Map // space, old space and code space. Returns each space in turn, and null when it // is done. @@ -2858,6 +2840,17 @@ class V8_EXPORT_PRIVATE V8_NODISCARD EmbedderStackStateScope final { const StackState old_stack_state_; }; +class V8_NODISCARD DisableConservativeStackScanningScopeForTesting { + public: + explicit inline DisableConservativeStackScanningScopeForTesting(Heap* heap) + : embedder_scope_(EmbedderStackStateScope::ExplicitScopeForTesting( + heap->local_embedder_heap_tracer(), + cppgc::EmbedderStackState::kNoHeapPointers)) {} + + private: + EmbedderStackStateScope embedder_scope_; +}; + class V8_NODISCARD CppClassNamesAsHeapObjectNameScope final { public: explicit CppClassNamesAsHeapObjectNameScope(v8::CppHeap* heap); diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 49cbaea44f..7278980310 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -2115,8 +2115,7 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor) { // v8::TracedReference alive from the stack. This is only needed when using // `EmbedderHeapTracer` and not using `CppHeap`. auto& stack = heap()->stack(); - if (heap_->local_embedder_heap_tracer()->embedder_stack_state() == - cppgc::EmbedderStackState::kMayContainHeapPointers) { + if (heap_->IsGCWithStack()) { ConservativeTracedHandlesMarkingVisitor conservative_marker( *heap_, *local_marking_worklists_, cppgc::internal::CollectionType::kMajor); diff --git a/test/unittests/heap/cppgc-js/unified-heap-unittest.cc b/test/unittests/heap/cppgc-js/unified-heap-unittest.cc index 3439542e3b..3934eb8b00 100644 --- a/test/unittests/heap/cppgc-js/unified-heap-unittest.cc +++ b/test/unittests/heap/cppgc-js/unified-heap-unittest.cc @@ -517,12 +517,6 @@ V8_NOINLINE void StackToHeapTest(v8::Isolate* v8_isolate, Operation op, // Disable scanning, assuming the slots are overwritten. DisableConservativeStackScanningScopeForTesting no_stack_scanning( reinterpret_cast(v8_isolate)->heap()); - EmbedderStackStateScope scope = - EmbedderStackStateScope::ExplicitScopeForTesting( - reinterpret_cast(v8_isolate) - ->heap() - ->local_embedder_heap_tracer(), - cppgc::EmbedderStackState::kNoHeapPointers); FullGC(v8_isolate); } ASSERT_TRUE(observer.IsEmpty()); @@ -565,7 +559,13 @@ V8_NOINLINE void HeapToStackTest(v8::Isolate* v8_isolate, Operation op, FullGC(v8_isolate); EXPECT_FALSE(observer.IsEmpty()); stack_handle.Reset(); - FullGC(v8_isolate); + { + // Conservative scanning may find stale pointers to on-stack handles. + // Disable scanning, assuming the slots are overwritten. + DisableConservativeStackScanningScopeForTesting no_stack_scanning( + reinterpret_cast(v8_isolate)->heap()); + FullGC(v8_isolate); + } EXPECT_TRUE(observer.IsEmpty()); } @@ -603,7 +603,13 @@ V8_NOINLINE void StackToStackTest(v8::Isolate* v8_isolate, Operation op, FullGC(v8_isolate); EXPECT_FALSE(observer.IsEmpty()); stack_handle2.Reset(); - FullGC(v8_isolate); + { + // Conservative scanning may find stale pointers to on-stack handles. + // Disable scanning, assuming the slots are overwritten. + DisableConservativeStackScanningScopeForTesting no_stack_scanning( + reinterpret_cast(v8_isolate)->heap()); + FullGC(v8_isolate); + } EXPECT_TRUE(observer.IsEmpty()); } diff --git a/test/unittests/heap/embedder-tracing-unittest.cc b/test/unittests/heap/embedder-tracing-unittest.cc index d2a54aecc2..5342c97260 100644 --- a/test/unittests/heap/embedder-tracing-unittest.cc +++ b/test/unittests/heap/embedder-tracing-unittest.cc @@ -484,12 +484,6 @@ TEST_F(EmbedderTracingTest, TracedReferenceHandlesMarking) { // Disable scanning, assuming the slots are overwritten. DisableConservativeStackScanningScopeForTesting no_stack_scanning( i_isolate()->heap()); - EmbedderStackStateScope scope = - EmbedderStackStateScope::ExplicitScopeForTesting( - reinterpret_cast(v8_isolate()) - ->heap() - ->local_embedder_heap_tracer(), - EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers); FullGC(); } const size_t final_count = traced_handles->used_node_count(); @@ -590,12 +584,6 @@ TEST_F(EmbedderTracingTest, BasicTracedReference) { // Disable scanning, assuming the slots are overwritten. DisableConservativeStackScanningScopeForTesting no_stack_scanning( i_isolate()->heap()); - EmbedderStackStateScope scope = - EmbedderStackStateScope::ExplicitScopeForTesting( - reinterpret_cast(v8_isolate()) - ->heap() - ->local_embedder_heap_tracer(), - EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers); FullGC(); } EXPECT_EQ(initial_count, traced_handles->used_node_count()); From 70bdadce8f79e9ab12b9e8972803aea708fd36e7 Mon Sep 17 00:00:00 2001 From: Milad Fa Date: Tue, 20 Dec 2022 13:55:13 -0500 Subject: [PATCH 014/654] PPC/s390: [centry] Remove the unused SaveFPRegsMode parameter Port 605e46479aca3449a6ba1350a1de7927c76b86ad Original Commit Message: The SaveFPRegsMode::kSave specializations of CEntry were unused. Remove this parameter to eliminate dead code. R=jgruber@chromium.org, joransiu@ca.ibm.com, junyan@redhat.com, midawson@redhat.com BUG= LOG=N Change-Id: I114dbd0045e891085182e5af79ff1f1201b48765 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4116857 Commit-Queue: Milad Farazmand Reviewed-by: Vasili Skurydzin Cr-Commit-Position: refs/heads/main@{#84965} --- src/builtins/ppc/builtins-ppc.cc | 18 ++++++------ src/builtins/s390/builtins-s390.cc | 18 ++++++------ src/codegen/ppc/macro-assembler-ppc.cc | 35 ++++++------------------ src/codegen/ppc/macro-assembler-ppc.h | 28 ++++++------------- src/codegen/s390/macro-assembler-s390.cc | 35 ++++++------------------ src/codegen/s390/macro-assembler-s390.h | 28 ++++++------------- 6 files changed, 50 insertions(+), 112 deletions(-) diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc index 1bdc9e816e..93e8b742f0 100644 --- a/src/builtins/ppc/builtins-ppc.cc +++ b/src/builtins/ppc/builtins-ppc.cc @@ -2949,8 +2949,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, - bool builtin_exit_frame) { + ArgvMode argv_mode, bool builtin_exit_frame) { // Called from JavaScript; parameters are on stack as if calling JS function. // r3: number of arguments including receiver // r4: pointer to builtin function @@ -2986,9 +2985,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, arg_stack_space += result_size; } - __ EnterExitFrame( - save_doubles == SaveFPRegsMode::kSave, arg_stack_space, - builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); + __ EnterExitFrame(arg_stack_space, builtin_exit_frame + ? StackFrame::BUILTIN_EXIT + : StackFrame::EXIT); // Store a copy of argc in callee-saved registers for later. __ mr(r14, r3); @@ -3053,7 +3052,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ? no_reg // r14: still holds argc (callee-saved). : r14; - __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc); + __ LeaveExitFrame(argc, false); __ blr(); // Handling of exception. @@ -3328,7 +3327,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, } else { __ mov(r14, Operand(stack_space)); } - __ LeaveExitFrame(false, r14, stack_space_operand != nullptr); + __ LeaveExitFrame(r14, stack_space_operand != nullptr); // Check if the function scheduled an exception. __ LoadRoot(r14, RootIndex::kTheHoleValue); @@ -3438,10 +3437,9 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // [1-3] FunctionCallbackInfo // [4] number of bytes to drop from the stack after returning static constexpr int kApiStackSpace = 5; - static constexpr bool kDontSaveDoubles = false; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). // Arguments are after the return address (pushed by EnterExitFrame()). @@ -3557,7 +3555,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { } FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(false, apiStackSpace); + __ EnterExitFrame(apiStackSpace, StackFrame::EXIT); if (!ABI_PASSES_HANDLES_IN_REGS) { // pass 1st arg by reference diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc index f1f9caad37..e1f968a4ee 100644 --- a/src/builtins/s390/builtins-s390.cc +++ b/src/builtins/s390/builtins-s390.cc @@ -2942,8 +2942,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, - bool builtin_exit_frame) { + ArgvMode argv_mode, bool builtin_exit_frame) { // Called from JavaScript; parameters are on stack as if calling JS function. // r2: number of arguments including receiver // r3: pointer to builtin function @@ -2983,9 +2982,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, arg_stack_space += 2; #endif - __ EnterExitFrame( - save_doubles == SaveFPRegsMode::kSave, arg_stack_space, - builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); + __ EnterExitFrame(arg_stack_space, builtin_exit_frame + ? StackFrame::BUILTIN_EXIT + : StackFrame::EXIT); // Store a copy of argc, argv in callee-saved registers for later. __ mov(r6, r2); @@ -3052,7 +3051,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ? no_reg // r6: still holds argc (callee-saved). : r6; - __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc); + __ LeaveExitFrame(argc, false); __ b(r14); // Handling of exception. @@ -3313,7 +3312,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, DCHECK_EQ(stack_space, 0); __ LoadU64(r6, *stack_space_operand); } - __ LeaveExitFrame(false, r6, stack_space_operand != nullptr); + __ LeaveExitFrame(r6, stack_space_operand != nullptr); // Check if the function scheduled an exception. __ Move(r7, ExternalReference::scheduled_exception_address(isolate)); @@ -3422,10 +3421,9 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // [1-3] FunctionCallbackInfo // [4] number of bytes to drop from the stack after returning static constexpr int kApiStackSpace = 5; - static constexpr bool kDontSaveDoubles = false; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). // Arguments are after the return address (pushed by EnterExitFrame()). @@ -3540,7 +3538,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { } FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(false, apiStackSpace); + __ EnterExitFrame(apiStackSpace, StackFrame::EXIT); if (!ABI_PASSES_HANDLES_IN_REGS) { // pass 1st arg by reference diff --git a/src/codegen/ppc/macro-assembler-ppc.cc b/src/codegen/ppc/macro-assembler-ppc.cc index d399da4fb7..fdbcb2788b 100644 --- a/src/codegen/ppc/macro-assembler-ppc.cc +++ b/src/codegen/ppc/macro-assembler-ppc.cc @@ -1349,7 +1349,7 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { // in the fp register (r31) // Then - we buy a new frame -void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, +void MacroAssembler::EnterExitFrame(int stack_space, StackFrame::Type frame_type) { DCHECK(frame_type == StackFrame::EXIT || frame_type == StackFrame::BUILTIN_EXIT); @@ -1385,15 +1385,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); StoreU64(cp, MemOperand(r8)); - // Optionally save all volatile double registers. - if (save_doubles) { - MultiPushDoubles(kCallerSavedDoubles); - // Note that d0 will be accessible at - // fp - ExitFrameConstants::kFrameSize - - // kNumCallerSavedDoubles * kDoubleSize, - // since the sp slot and code slot were pushed after the fp. - } - AddS64(sp, sp, Operand(-stack_space * kSystemPointerSize)); // Allocate and align the frame preparing for calling the runtime @@ -1431,18 +1422,9 @@ int TurboAssembler::ActivationFrameAlignment() { #endif } -void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, +void MacroAssembler::LeaveExitFrame(Register argument_count, bool argument_count_is_length) { ConstantPoolUnavailableScope constant_pool_unavailable(this); - // Optionally restore all double registers. - if (save_doubles) { - // Calculate the stack location of the saved doubles and restore them. - const int kNumRegs = kNumCallerSavedDoubles; - const int offset = - (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize); - AddS64(r6, fp, Operand(-offset), r0); - MultiPopDoubles(kCallerSavedDoubles, r6); - } // Clear top frame. li(r6, Operand::Zero()); @@ -2168,8 +2150,8 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( TailCallOptimizedCodeSlot(this, optimized_code_entry, r9); } -void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles) { +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments) { // All parameters are on the stack. r3 has the return value after call. // If the expected number of arguments of the runtime function is @@ -2184,10 +2166,9 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, mov(r3, Operand(num_arguments)); Move(r4, ExternalReference::Create(f)); #if V8_TARGET_ARCH_PPC64 - Handle code = - CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Handle code = CodeFactory::CEntry(isolate(), f->result_size); #else - Handle code = CodeFactory::CEntry(isolate(), 1, save_doubles); + Handle code = CodeFactory::CEntry(isolate(), 1); #endif Call(code, RelocInfo::CODE_TARGET); } @@ -2204,8 +2185,8 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, bool builtin_exit_frame) { Move(r4, builtin); - Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, builtin_exit_frame); + Handle code = + CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET); } diff --git a/src/codegen/ppc/macro-assembler-ppc.h b/src/codegen/ppc/macro-assembler-ppc.h index bf278e86c1..67d0565f0b 100644 --- a/src/codegen/ppc/macro-assembler-ppc.h +++ b/src/codegen/ppc/macro-assembler-ppc.h @@ -1452,15 +1452,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Enter exit frame. // stack_space - extra stack space, used for parameters before call to C. - // At least one slot (for the return address) should be provided. - void EnterExitFrame(bool save_doubles, int stack_space = 1, - StackFrame::Type frame_type = StackFrame::EXIT); + void EnterExitFrame(int stack_space, StackFrame::Type frame_type); // Leave the current exit frame. Expects the return value in r0. // Expect the number of values, pushed prior to the exit frame, to // remove in a register (or no_reg, if there is nothing to remove). - void LeaveExitFrame(bool save_doubles, Register argument_count, - bool argument_count_is_length = false); + void LeaveExitFrame(Register argument_count, bool argument_count_is_length); // Load the global proxy from the current context. void LoadGlobalProxy(Register dst) { @@ -1596,24 +1593,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void CallJSEntry(Register target); // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); - void CallRuntimeSaveDoubles(Runtime::FunctionId fid) { + void CallRuntime(const Runtime::Function* f, int num_arguments); + + // Convenience function: Same as above, but takes the fid instead. + void CallRuntime(Runtime::FunctionId fid) { const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, SaveFPRegsMode::kSave); + CallRuntime(function, function->nargs); } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, save_doubles); - } - - // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); + void CallRuntime(Runtime::FunctionId fid, int num_arguments) { + CallRuntime(Runtime::FunctionForId(fid), num_arguments); } // Convenience function: tail call a runtime routine (jump). diff --git a/src/codegen/s390/macro-assembler-s390.cc b/src/codegen/s390/macro-assembler-s390.cc index 2fec07fbf4..c626cc2674 100644 --- a/src/codegen/s390/macro-assembler-s390.cc +++ b/src/codegen/s390/macro-assembler-s390.cc @@ -1542,7 +1542,7 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { // gaps // Args // ABIRes <- newSP -void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, +void MacroAssembler::EnterExitFrame(int stack_space, StackFrame::Type frame_type) { DCHECK(frame_type == StackFrame::EXIT || frame_type == StackFrame::BUILTIN_EXIT); @@ -1574,15 +1574,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); StoreU64(cp, MemOperand(r1)); - // Optionally save all volatile double registers. - if (save_doubles) { - MultiPushDoubles(kCallerSavedDoubles); - // Note that d0 will be accessible at - // fp - ExitFrameConstants::kFrameSize - - // kNumCallerSavedDoubles * kDoubleSize, - // since the sp slot and code slot were pushed after the fp. - } - lay(sp, MemOperand(sp, -stack_space * kSystemPointerSize)); // Allocate and align the frame preparing for calling the runtime @@ -1617,17 +1608,8 @@ int TurboAssembler::ActivationFrameAlignment() { #endif } -void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, +void MacroAssembler::LeaveExitFrame(Register argument_count, bool argument_count_is_length) { - // Optionally restore all double registers. - if (save_doubles) { - // Calculate the stack location of the saved doubles and restore them. - const int kNumRegs = kNumCallerSavedDoubles; - lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp + - kNumRegs * kDoubleSize))); - MultiPopDoubles(kCallerSavedDoubles, r5); - } - // Clear top frame. Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); @@ -2163,8 +2145,8 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( TailCallOptimizedCodeSlot(this, optimized_code_entry, r8); } -void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles) { +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments) { // All parameters are on the stack. r2 has the return value after call. // If the expected number of arguments of the runtime function is @@ -2179,10 +2161,9 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, mov(r2, Operand(num_arguments)); Move(r3, ExternalReference::Create(f)); #if V8_TARGET_ARCH_S390X - Handle code = - CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Handle code = CodeFactory::CEntry(isolate(), f->result_size); #else - Handle code = CodeFactory::CEntry(isolate(), 1, save_doubles); + Handle code = CodeFactory::CEntry(isolate(), 1); #endif Call(code, RelocInfo::CODE_TARGET); @@ -2200,8 +2181,8 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, bool builtin_exit_frame) { Move(r3, builtin); - Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, builtin_exit_frame); + Handle code = + CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET); } diff --git a/src/codegen/s390/macro-assembler-s390.h b/src/codegen/s390/macro-assembler-s390.h index 6a91f1c096..4934755d95 100644 --- a/src/codegen/s390/macro-assembler-s390.h +++ b/src/codegen/s390/macro-assembler-s390.h @@ -1521,24 +1521,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { StoreU64(rec, MemOperand(sp, 0)); } - void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); - void CallRuntimeSaveDoubles(Runtime::FunctionId fid) { + void CallRuntime(const Runtime::Function* f, int num_arguments); + + // Convenience function: Same as above, but takes the fid instead. + void CallRuntime(Runtime::FunctionId fid) { const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, SaveFPRegsMode::kSave); + CallRuntime(function, function->nargs); } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, save_doubles); - } - - // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); + void CallRuntime(Runtime::FunctionId fid, int num_arguments) { + CallRuntime(Runtime::FunctionForId(fid), num_arguments); } // Convenience function: tail call a runtime routine (jump). @@ -1686,15 +1679,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Enter exit frame. // stack_space - extra stack space, used for parameters before call to C. - // At least one slot (for the return address) should be provided. - void EnterExitFrame(bool save_doubles, int stack_space = 1, - StackFrame::Type frame_type = StackFrame::EXIT); + void EnterExitFrame(int stack_space, StackFrame::Type frame_type); // Leave the current exit frame. Expects the return value in r0. // Expect the number of values, pushed prior to the exit frame, to // remove in a register (or no_reg, if there is nothing to remove). - void LeaveExitFrame(bool save_doubles, Register argument_count, - bool argument_count_is_length = false); + void LeaveExitFrame(Register argument_count, bool argument_count_is_length); // Load the global proxy from the current context. void LoadGlobalProxy(Register dst) { From ab957d40bdc772e24f084081b9206967d609a27d Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Wed, 21 Dec 2022 09:16:59 +0100 Subject: [PATCH 015/654] [maglev] Fix clobbering input in Switch TurboAssembler::Switch can cloabber the input register depending on the case value. Bug: v8:7700 Change-Id: I5064826c8616389237c6c8b5fcd1b4e73e27ea29 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110853 Auto-Submit: Victor Gomes Reviewed-by: Toon Verwaest Commit-Queue: Toon Verwaest Cr-Commit-Position: refs/heads/main@{#84966} --- src/maglev/x64/maglev-ir-x64.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 1d00b348b3..b0a08bf6b0 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -2965,7 +2965,7 @@ void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { __ Ret(); } -void Switch::SetValueLocationConstraints() { UseRegister(value()); } +void Switch::SetValueLocationConstraints() { UseAndClobberRegister(value()); } void Switch::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { std::unique_ptr labels = std::make_unique(size()); for (int i = 0; i < size(); i++) { From 72f041654d2af6751e980afe40e1d7ada4b788de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Inf=C3=BChr?= Date: Wed, 21 Dec 2022 09:43:41 +0100 Subject: [PATCH 016/654] [objects] Use global safepoint in OptimizedCodeIterator if needed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MakeHeapIterable() in a shared heap isolate requires a global safepoint. OptimizedCodeIterator uses MakeHeapIterable() but always entered a local safepoint. This CL changes OptimizedCodeIterator to request a global safepoint for shared heap isolates. Bug: v8:13267 Change-Id: I642a7731f4294cc1558de92d4daf5a1e3603f6cc Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110854 Commit-Queue: Nikolaos Papaspyrou Auto-Submit: Dominik Inführ Commit-Queue: Dominik Inführ Reviewed-by: Nikolaos Papaspyrou Cr-Commit-Position: refs/heads/main@{#84967} --- src/objects/code.cc | 6 ++++-- src/objects/code.h | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/objects/code.cc b/src/objects/code.cc index 50f995fae9..bcc955e0f1 100644 --- a/src/objects/code.cc +++ b/src/objects/code.cc @@ -420,8 +420,10 @@ bool Code::Inlines(SharedFunctionInfo sfi) { Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) : isolate_(isolate), - safepoint_scope_( - std::make_unique(isolate->heap())), + safepoint_scope_(std::make_unique( + isolate, isolate->is_shared_heap_isolate() + ? SafepointKind::kGlobal + : SafepointKind::kIsolate)), object_iterator_( isolate->heap()->code_space()->GetObjectIterator(isolate->heap())), state_(kIteratingCodeSpace) {} diff --git a/src/objects/code.h b/src/objects/code.h index 078ba9c334..2f5ff896b7 100644 --- a/src/objects/code.h +++ b/src/objects/code.h @@ -29,7 +29,7 @@ class BytecodeArray; class CodeDataContainer; class CodeDesc; class ObjectIterator; -class IsolateSafepointScope; +class SafepointScope; class LocalFactory; template @@ -993,7 +993,7 @@ class Code::OptimizedCodeIterator { private: Isolate* isolate_; - std::unique_ptr safepoint_scope_; + std::unique_ptr safepoint_scope_; std::unique_ptr object_iterator_; enum { kIteratingCodeSpace, kIteratingCodeLOSpace, kDone } state_; From f7547a135aedab92a2ac8a59e0f4b822228c78f0 Mon Sep 17 00:00:00 2001 From: Toon Verwaest Date: Wed, 21 Dec 2022 10:50:52 +0100 Subject: [PATCH 017/654] Reland "[maglev] Enable --maglev with --future" Bug: v8:7700 Change-Id: Ic14006b4f3bed1f48cb910b1d1eb2d45fc207d8d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4118771 Commit-Queue: Leszek Swirski Commit-Queue: Toon Verwaest Auto-Submit: Toon Verwaest Reviewed-by: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#84968} --- src/flags/flag-definitions.h | 1 + test/mjsunit/interrupt-budget-override.js | 2 +- test/mjsunit/never-optimize.js | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index 5ea43974f3..a74006935b 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -455,6 +455,7 @@ DEFINE_BOOL(lower_tier_as_toptier, false, #ifdef V8_ENABLE_MAGLEV #define V8_ENABLE_MAGLEV_BOOL true DEFINE_BOOL(maglev, false, "enable the maglev optimizing compiler") +DEFINE_WEAK_IMPLICATION(future, maglev) DEFINE_BOOL(maglev_inlining, false, "enable inlining in the maglev optimizing compiler") DEFINE_BOOL(maglev_reuse_stack_slots, true, diff --git a/test/mjsunit/interrupt-budget-override.js b/test/mjsunit/interrupt-budget-override.js index 9f3784e793..fe0a874de0 100644 --- a/test/mjsunit/interrupt-budget-override.js +++ b/test/mjsunit/interrupt-budget-override.js @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -// Flags: --turbofan --interrupt-budget=100 --interrupt-budget-for-feedback-allocation=10 --allow-natives-syntax +// Flags: --turbofan --interrupt-budget=100 --interrupt-budget-for-feedback-allocation=10 --allow-natives-syntax --nomaglev function f() { let s = 0; diff --git a/test/mjsunit/never-optimize.js b/test/mjsunit/never-optimize.js index 8007eb5c2a..5294edd9bc 100644 --- a/test/mjsunit/never-optimize.js +++ b/test/mjsunit/never-optimize.js @@ -26,7 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Flags: --allow-natives-syntax --turbofan --no-always-turbofan --no-use-osr -// Flags: --interrupt-budget=1024 +// Flags: --interrupt-budget=1024 --nomaglev function o1() { } %PrepareFunctionForOptimization(o1); From baf110644c2aff9a7e2fba579c95774cecebce4e Mon Sep 17 00:00:00 2001 From: pthier Date: Tue, 20 Dec 2022 15:12:59 +0100 Subject: [PATCH 018/654] [maglev][arm64] Fix scratch register usage in deferred code Usage of scratch registers acquired via scratch register scope in deferred code is not valid, as the scope is not active within deferred code. Use proper temporaries (assigned by register allocation) instead. Bug: v8:7700 Change-Id: If4ef4c09ea6f5a551f44bda15b8eeb1055d07070 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111170 Reviewed-by: Victor Gomes Commit-Queue: Patrick Thier Cr-Commit-Position: refs/heads/main@{#84969} --- src/maglev/arm64/maglev-ir-arm64.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 07e0914c9e..df023d184b 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -161,8 +161,8 @@ void BuiltinStringFromCharCode::SetValueLocationConstraints() { UseAny(code_input()); } else { UseRegister(code_input()); - set_temporaries_needed(1); } + set_temporaries_needed(1); DefineAsRegister(this); } void BuiltinStringFromCharCode::GenerateCode(MaglevAssembler* masm, @@ -350,6 +350,7 @@ void CheckedTruncateFloat64ToUint32::GenerateCode( void CheckedTruncateNumberToInt32::SetValueLocationConstraints() { UseRegister(input()); DefineAsRegister(this); + set_double_temporaries_needed(1); } void CheckedTruncateNumberToInt32::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { @@ -368,7 +369,7 @@ void CheckedTruncateNumberToInt32::GenerateCode(MaglevAssembler* masm, __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); __ CompareRoot(scratch, RootIndex::kHeapNumberMap); __ EmitEagerDeoptIf(ne, DeoptimizeReason::kNotANumber, this); - DoubleRegister double_value = temps.AcquireD(); + DoubleRegister double_value = double_temporaries().PopFirst(); __ Ldr(double_value, FieldMemOperand(value, HeapNumber::kValueOffset)); __ TruncateDoubleToInt32(result_reg, double_value); __ bind(&done); @@ -462,6 +463,7 @@ int CheckedObjectToIndex::MaxCallStackArgs() const { return 0; } void CheckedObjectToIndex::SetValueLocationConstraints() { UseRegister(object_input()); DefineAsRegister(this); + set_double_temporaries_needed(1); } void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { @@ -489,7 +491,7 @@ void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm, // Heap Number. { - DoubleRegister number_value = temps.AcquireD(); + DoubleRegister number_value = node->double_temporaries().first(); DoubleRegister converted_back = temps.AcquireD(); // Convert the input float64 value to int32. __ TruncateDoubleToInt32(result_reg, number_value); From 8623fd473b8742eec2d6780db72b982130b55da4 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Wed, 21 Dec 2022 11:23:01 +0100 Subject: [PATCH 019/654] [maglev] Destroy jobs on BG thread Maglev job destruction is suprisingly expensive, taking up roughly a third of total finalization time. Rather than destroying jobs as part of finalization, re-post them to the concurrent dispatcher to be destroyed in the background Bug: v8:7700 Change-Id: I450d8a7b49737504c2aaebbfa7754e0ae15e7f05 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111238 Reviewed-by: Victor Gomes Auto-Submit: Leszek Swirski Commit-Queue: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#84970} --- src/maglev/maglev-concurrent-dispatcher.cc | 20 ++++++++++++++++++-- src/maglev/maglev-concurrent-dispatcher.h | 1 + 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/src/maglev/maglev-concurrent-dispatcher.cc b/src/maglev/maglev-concurrent-dispatcher.cc index a098505e4d..aa5acf7a99 100644 --- a/src/maglev/maglev-concurrent-dispatcher.cc +++ b/src/maglev/maglev-concurrent-dispatcher.cc @@ -158,6 +158,7 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask { LocalIsolate local_isolate(isolate(), ThreadKind::kBackground); DCHECK(local_isolate.heap()->IsParked()); + bool job_was_executed = false; while (!incoming_queue()->IsEmpty() && !delegate->ShouldYield()) { std::unique_ptr job; if (!incoming_queue()->Dequeue(&job)) break; @@ -168,20 +169,32 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask { RuntimeCallStats* rcs = nullptr; // TODO(v8:7700): Implement. CompilationJob::Status status = job->ExecuteJob(rcs, &local_isolate); if (status == CompilationJob::SUCCEEDED) { + job_was_executed = true; outgoing_queue()->Enqueue(std::move(job)); } } - isolate()->stack_guard()->RequestInstallMaglevCode(); + if (job_was_executed) { + isolate()->stack_guard()->RequestInstallMaglevCode(); + } + // Maglev jobs aren't cheap to destruct, so destroy them here in the + // background thread rather than on the main thread. + while (!destruction_queue()->IsEmpty() && !delegate->ShouldYield()) { + std::unique_ptr job; + if (!destruction_queue()->Dequeue(&job)) break; + DCHECK_NOT_NULL(job); + job.reset(); + } } size_t GetMaxConcurrency(size_t) const override { - return incoming_queue()->size(); + return incoming_queue()->size() + destruction_queue()->size(); } private: Isolate* isolate() const { return dispatcher_->isolate_; } QueueT* incoming_queue() const { return &dispatcher_->incoming_queue_; } QueueT* outgoing_queue() const { return &dispatcher_->outgoing_queue_; } + QueueT* destruction_queue() const { return &dispatcher_->destruction_queue_; } MaglevConcurrentDispatcher* const dispatcher_; const Handle function_; @@ -224,6 +237,9 @@ void MaglevConcurrentDispatcher::FinalizeFinishedJobs() { "V8.MaglevConcurrentFinalize", job.get(), TRACE_EVENT_FLAG_FLOW_IN); Compiler::FinalizeMaglevCompilationJob(job.get(), isolate_); + // Maglev jobs aren't cheap to destruct, so re-enqueue them for destruction + // on a background thread. + destruction_queue_.Enqueue(std::move(job)); } } diff --git a/src/maglev/maglev-concurrent-dispatcher.h b/src/maglev/maglev-concurrent-dispatcher.h index 28343f5dd5..20a355f211 100644 --- a/src/maglev/maglev-concurrent-dispatcher.h +++ b/src/maglev/maglev-concurrent-dispatcher.h @@ -100,6 +100,7 @@ class MaglevConcurrentDispatcher final { std::unique_ptr job_handle_; QueueT incoming_queue_; QueueT outgoing_queue_; + QueueT destruction_queue_; }; } // namespace maglev From 39abc766995a96258a7c56a152cc7eeb47ec32d6 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Wed, 21 Dec 2022 10:32:12 +0800 Subject: [PATCH 020/654] [riscv][centry] Remove the unused SaveFPRegsMode parameter Port commit 605e46479aca3449a6ba1350a1de7927c76b86ad Bug: v8:13606 Change-Id: I0f700a2607860ad93be3b2f8492f5822e48b0c3f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4119766 Auto-Submit: Yahan Lu Reviewed-by: ji qiu Commit-Queue: ji qiu Cr-Commit-Position: refs/heads/main@{#84971} --- src/builtins/riscv/builtins-riscv.cc | 17 +++------ src/codegen/riscv/macro-assembler-riscv.cc | 44 ++++------------------ src/codegen/riscv/macro-assembler-riscv.h | 18 ++++----- 3 files changed, 20 insertions(+), 59 deletions(-) diff --git a/src/builtins/riscv/builtins-riscv.cc b/src/builtins/riscv/builtins-riscv.cc index fa599ffac3..95deaed31f 100644 --- a/src/builtins/riscv/builtins-riscv.cc +++ b/src/builtins/riscv/builtins-riscv.cc @@ -2783,8 +2783,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, - bool builtin_exit_frame) { + ArgvMode argv_mode, bool builtin_exit_frame) { // Called from JavaScript; parameters are on stack as if calling JS function // a0: number of arguments including receiver // a1: pointer to builtin function @@ -2807,8 +2806,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Enter the exit frame that transitions from JavaScript to C++. FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame( - save_doubles == SaveFPRegsMode::kSave, 0, - builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); + 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // s3: number of arguments including receiver (C callee-saved) // s1: pointer to first argument (C callee-saved) @@ -2861,7 +2859,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ? no_reg // s3: still holds argc (callee-saved). : s3; - __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN); + __ LeaveExitFrame(argc, EMIT_RETURN); // Handling of exception. __ bind(&exception_returned); @@ -3162,10 +3160,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ LoadWord(s3, *stack_space_operand); } - static constexpr bool kDontSaveDoubles = false; static constexpr bool kRegisterContainsSlotCount = false; - __ LeaveExitFrame(kDontSaveDoubles, s3, NO_EMIT_RETURN, - kRegisterContainsSlotCount); + __ LeaveExitFrame(s3, NO_EMIT_RETURN, kRegisterContainsSlotCount); // Check if the function scheduled an exception. __ LoadRoot(a4, RootIndex::kTheHoleValue); @@ -3270,9 +3266,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // Allocate the v8::Arguments structure in the arguments' space since // it's not controlled by GC. static constexpr int kApiStackSpace = 4; - static constexpr bool kDontSaveDoubles = false; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); + __ EnterExitFrame(kApiStackSpace); // EnterExitFrame may align the sp. @@ -3376,7 +3371,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { const int kApiStackSpace = 1; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(false, kApiStackSpace); + __ EnterExitFrame(kApiStackSpace); // Create v8::PropertyCallbackInfo object on the stack and initialize // it's args_ field. diff --git a/src/codegen/riscv/macro-assembler-riscv.cc b/src/codegen/riscv/macro-assembler-riscv.cc index e9ac398752..2c8caca0c3 100644 --- a/src/codegen/riscv/macro-assembler-riscv.cc +++ b/src/codegen/riscv/macro-assembler-riscv.cc @@ -5326,8 +5326,8 @@ void TurboAssembler::MulOverflow32(Register dst, Register left, } #endif -void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles) { +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments) { ASM_CODE_COMMENT(this); // All parameters are on the stack. a0 has the return value after call. @@ -5342,8 +5342,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, // smarter. PrepareCEntryArgs(num_arguments); PrepareCEntryFunction(ExternalReference::Create(f)); - Handle code = - CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Handle code = CodeFactory::CEntry(isolate(), f->result_size); Call(code, RelocInfo::CODE_TARGET); } @@ -5361,8 +5360,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, bool builtin_exit_frame) { ASM_CODE_COMMENT(this); PrepareCEntryFunction(builtin); - Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, builtin_exit_frame); + Handle code = + CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg)); } @@ -5547,7 +5546,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { LoadWord(fp, MemOperand(fp, 0 * kSystemPointerSize)); } -void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, +void MacroAssembler::EnterExitFrame(int stack_space, StackFrame::Type frame_type) { ASM_CODE_COMMENT(this); DCHECK(frame_type == StackFrame::EXIT || @@ -5600,19 +5599,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, } const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); - if (save_doubles) { - // The stack is already aligned to 0 modulo 8 for stores with sdc1. - int space = kNumCallerSavedFPU * kDoubleSize; - SubWord(sp, sp, Operand(space)); - int count = 0; - for (int i = 0; i < kNumFPURegisters; i++) { - if (kCallerSavedFPU.bits() & (1 << i)) { - FPURegister reg = FPURegister::from_code(i); - StoreDouble(reg, MemOperand(sp, count * kDoubleSize)); - count++; - } - } - } // Reserve place for the return address, stack space and an optional slot // (used by DirectCEntry to hold the return value if a struct is @@ -5632,28 +5618,12 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, StoreWord(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); } -void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, - bool do_return, +void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return, bool argument_count_is_length) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); BlockTrampolinePoolScope block_trampoline_pool(this); - // Optionally restore all double registers. - if (save_doubles) { - // Remember: we only need to restore kCallerSavedFPU. - SubWord(scratch, fp, - Operand(ExitFrameConstants::kFixedFrameSizeFromFp + - kNumCallerSavedFPU * kDoubleSize)); - int cout = 0; - for (int i = 0; i < kNumFPURegisters; i++) { - if (kCalleeSavedFPU.bits() & (1 << i)) { - FPURegister reg = FPURegister::from_code(i); - LoadDouble(reg, MemOperand(scratch, cout * kDoubleSize)); - cout++; - } - } - } // Clear top frame. li(scratch, diff --git a/src/codegen/riscv/macro-assembler-riscv.h b/src/codegen/riscv/macro-assembler-riscv.h index f7870ca7b2..6fc51271fa 100644 --- a/src/codegen/riscv/macro-assembler-riscv.h +++ b/src/codegen/riscv/macro-assembler-riscv.h @@ -1314,12 +1314,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // argc - argument count to be dropped by LeaveExitFrame. // save_doubles - saves FPU registers on stack. // stack_space - extra stack space. - void EnterExitFrame(bool save_doubles, int stack_space = 0, + void EnterExitFrame(int stack_space = 0, StackFrame::Type frame_type = StackFrame::EXIT); // Leave the current exit frame. - void LeaveExitFrame(bool save_doubles, Register arg_count, - bool do_return = NO_EMIT_RETURN, + void LeaveExitFrame(Register arg_count, bool do_return = NO_EMIT_RETURN, bool argument_count_is_length = false); // Make sure the stack is aligned. Only emits code in debug mode. @@ -1391,20 +1390,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Runtime calls. // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); + void CallRuntime(const Runtime::Function* f, int num_arguments); // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { + void CallRuntime(Runtime::FunctionId fid) { const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, save_doubles); + CallRuntime(function, function->nargs); } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); + void CallRuntime(Runtime::FunctionId fid, int num_arguments) { + CallRuntime(Runtime::FunctionForId(fid), num_arguments); } // Convenience function: tail call a runtime routine (jump). From 24da07944402e24817ef3d9951689de099eea390 Mon Sep 17 00:00:00 2001 From: Nikolaos Papaspyrou Date: Wed, 21 Dec 2022 10:32:25 +0000 Subject: [PATCH 021/654] Revert "[heap] Merge mechanisms for disabling CSS" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit f51e0bb1db67cfa1b4ac11b13e5cbee0b8601149. Reason for revert: Dependent on crrev.com/c/4092737 that is being reverted. Original change's description: > [heap] Merge mechanisms for disabling CSS > > EmbedderStackStateScope is used to disable conservative stack scanning > for cppgc when the stack is known to not contain heap pointers. Also, > DisableConservativeStackScanningScopeForTesting is used to disable CSS > for the V8 heap in tests that assume a precise GC. Until now, these two > have used two different mechanisms for disabling CSS. This CL merges > the two mechanisms and implements the latter scope via the former. > > Bug: v8:13257 > Change-Id: Ieca082657854fe2eff9eb5d95a30d48bb8eab44f > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111954 > Reviewed-by: Michael Lippautz > Commit-Queue: Nikolaos Papaspyrou > Cr-Commit-Position: refs/heads/main@{#84964} Bug: v8:13257 Change-Id: Id769af6215a2ed319ec96b354734a5362b2384cf No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111179 Commit-Queue: Nikolaos Papaspyrou Reviewed-by: Dominik Inführ Cr-Commit-Position: refs/heads/main@{#84972} --- src/heap/heap.cc | 16 +++++----- src/heap/heap.h | 29 ++++++++++++------- src/heap/mark-compact.cc | 3 +- .../heap/cppgc-js/unified-heap-unittest.cc | 22 +++++--------- .../heap/embedder-tracing-unittest.cc | 12 ++++++++ 5 files changed, 48 insertions(+), 34 deletions(-) diff --git a/src/heap/heap.cc b/src/heap/heap.cc index e9d83704c4..0852c8b8fa 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -4872,16 +4872,16 @@ void Heap::IterateStackRoots(RootVisitor* v, StackState stack_state, isolate()->Iterate(v); #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING - if (stack_state == StackState::kNoHeapPointers || !IsGCWithStack()) return; + if (stack_state == StackState::kNoHeapPointers || + disable_conservative_stack_scanning_for_testing_) + return; // In case of a shared GC, we're interested in the main isolate for CSS. - Isolate* main_isolate; - if (mode == IterateRootsMode::kShared) { - main_isolate = isolate()->shared_heap_isolate(); - if (!main_isolate->heap()->IsGCWithStack()) return; - } else { - main_isolate = isolate(); - } + Isolate* main_isolate = mode == IterateRootsMode::kShared + ? isolate()->shared_heap_isolate() + : isolate(); + if (main_isolate->heap()->disable_conservative_stack_scanning_for_testing_) + return; ConservativeStackVisitor stack_visitor(main_isolate, v); stack().IteratePointers(&stack_visitor); diff --git a/src/heap/heap.h b/src/heap/heap.h index ff97369f80..fe29659ce6 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -2409,6 +2409,7 @@ class Heap { bool force_oom_ = false; bool force_gc_on_next_allocation_ = false; bool delay_sweeper_tasks_for_testing_ = false; + bool disable_conservative_stack_scanning_for_testing_ = false; UnorderedHeapObjectMap retainer_; UnorderedHeapObjectMap retaining_root_; @@ -2685,6 +2686,23 @@ class V8_EXPORT_PRIVATE V8_NODISCARD SaveStackContextScope { ::heap::base::Stack* stack_; }; +class V8_NODISCARD DisableConservativeStackScanningScopeForTesting { + public: + explicit inline DisableConservativeStackScanningScopeForTesting(Heap* heap) + : heap_(heap), + old_value_(heap_->disable_conservative_stack_scanning_for_testing_) { + heap_->disable_conservative_stack_scanning_for_testing_ = true; + } + + inline ~DisableConservativeStackScanningScopeForTesting() { + heap_->disable_conservative_stack_scanning_for_testing_ = old_value_; + } + + private: + Heap* heap_; + bool old_value_; +}; + // Space iterator for iterating over all the paged spaces of the heap: Map // space, old space and code space. Returns each space in turn, and null when it // is done. @@ -2840,17 +2858,6 @@ class V8_EXPORT_PRIVATE V8_NODISCARD EmbedderStackStateScope final { const StackState old_stack_state_; }; -class V8_NODISCARD DisableConservativeStackScanningScopeForTesting { - public: - explicit inline DisableConservativeStackScanningScopeForTesting(Heap* heap) - : embedder_scope_(EmbedderStackStateScope::ExplicitScopeForTesting( - heap->local_embedder_heap_tracer(), - cppgc::EmbedderStackState::kNoHeapPointers)) {} - - private: - EmbedderStackStateScope embedder_scope_; -}; - class V8_NODISCARD CppClassNamesAsHeapObjectNameScope final { public: explicit CppClassNamesAsHeapObjectNameScope(v8::CppHeap* heap); diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 7278980310..49cbaea44f 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -2115,7 +2115,8 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor) { // v8::TracedReference alive from the stack. This is only needed when using // `EmbedderHeapTracer` and not using `CppHeap`. auto& stack = heap()->stack(); - if (heap_->IsGCWithStack()) { + if (heap_->local_embedder_heap_tracer()->embedder_stack_state() == + cppgc::EmbedderStackState::kMayContainHeapPointers) { ConservativeTracedHandlesMarkingVisitor conservative_marker( *heap_, *local_marking_worklists_, cppgc::internal::CollectionType::kMajor); diff --git a/test/unittests/heap/cppgc-js/unified-heap-unittest.cc b/test/unittests/heap/cppgc-js/unified-heap-unittest.cc index 3934eb8b00..3439542e3b 100644 --- a/test/unittests/heap/cppgc-js/unified-heap-unittest.cc +++ b/test/unittests/heap/cppgc-js/unified-heap-unittest.cc @@ -517,6 +517,12 @@ V8_NOINLINE void StackToHeapTest(v8::Isolate* v8_isolate, Operation op, // Disable scanning, assuming the slots are overwritten. DisableConservativeStackScanningScopeForTesting no_stack_scanning( reinterpret_cast(v8_isolate)->heap()); + EmbedderStackStateScope scope = + EmbedderStackStateScope::ExplicitScopeForTesting( + reinterpret_cast(v8_isolate) + ->heap() + ->local_embedder_heap_tracer(), + cppgc::EmbedderStackState::kNoHeapPointers); FullGC(v8_isolate); } ASSERT_TRUE(observer.IsEmpty()); @@ -559,13 +565,7 @@ V8_NOINLINE void HeapToStackTest(v8::Isolate* v8_isolate, Operation op, FullGC(v8_isolate); EXPECT_FALSE(observer.IsEmpty()); stack_handle.Reset(); - { - // Conservative scanning may find stale pointers to on-stack handles. - // Disable scanning, assuming the slots are overwritten. - DisableConservativeStackScanningScopeForTesting no_stack_scanning( - reinterpret_cast(v8_isolate)->heap()); - FullGC(v8_isolate); - } + FullGC(v8_isolate); EXPECT_TRUE(observer.IsEmpty()); } @@ -603,13 +603,7 @@ V8_NOINLINE void StackToStackTest(v8::Isolate* v8_isolate, Operation op, FullGC(v8_isolate); EXPECT_FALSE(observer.IsEmpty()); stack_handle2.Reset(); - { - // Conservative scanning may find stale pointers to on-stack handles. - // Disable scanning, assuming the slots are overwritten. - DisableConservativeStackScanningScopeForTesting no_stack_scanning( - reinterpret_cast(v8_isolate)->heap()); - FullGC(v8_isolate); - } + FullGC(v8_isolate); EXPECT_TRUE(observer.IsEmpty()); } diff --git a/test/unittests/heap/embedder-tracing-unittest.cc b/test/unittests/heap/embedder-tracing-unittest.cc index 5342c97260..d2a54aecc2 100644 --- a/test/unittests/heap/embedder-tracing-unittest.cc +++ b/test/unittests/heap/embedder-tracing-unittest.cc @@ -484,6 +484,12 @@ TEST_F(EmbedderTracingTest, TracedReferenceHandlesMarking) { // Disable scanning, assuming the slots are overwritten. DisableConservativeStackScanningScopeForTesting no_stack_scanning( i_isolate()->heap()); + EmbedderStackStateScope scope = + EmbedderStackStateScope::ExplicitScopeForTesting( + reinterpret_cast(v8_isolate()) + ->heap() + ->local_embedder_heap_tracer(), + EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers); FullGC(); } const size_t final_count = traced_handles->used_node_count(); @@ -584,6 +590,12 @@ TEST_F(EmbedderTracingTest, BasicTracedReference) { // Disable scanning, assuming the slots are overwritten. DisableConservativeStackScanningScopeForTesting no_stack_scanning( i_isolate()->heap()); + EmbedderStackStateScope scope = + EmbedderStackStateScope::ExplicitScopeForTesting( + reinterpret_cast(v8_isolate()) + ->heap() + ->local_embedder_heap_tracer(), + EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers); FullGC(); } EXPECT_EQ(initial_count, traced_handles->used_node_count()); From 53e7cf253a6ea7e7ae25008425898fd324cca671 Mon Sep 17 00:00:00 2001 From: Nikolaos Papaspyrou Date: Wed, 21 Dec 2022 10:05:23 +0000 Subject: [PATCH 022/654] Revert "Reland "[heap] Fix conservative stack scanning for client isolates"" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 2a71e8637f0ee70d11873abef7cda8c772f7a8ad. Reason for revert: This probably blocked the V8 roll. https://chromium-review.googlesource.com/c/chromium/src/+/4116621?tab=checks Original change's description: > Reland "[heap] Fix conservative stack scanning for client isolates" > > This is a reland of commit 36bac1bcae1020fce5b9dfe54522d33df80b2dd6 > > Original change's description: > > [heap] Fix conservative stack scanning for client isolates > > > > With this CL, the context of stacks corresponding to all client isolates > > are saved, so that conservative stack scanning can be used correctly > > during a shared garbage collection. This happens: > > > > 1) in Heap::PerformSharedGarbageCollection, for the stacks of the shared > > isolate and the initiator; > > 2) when an isolate's main thread is waiting in a safepoint; and > > 3) when an isolate's main thread is parked. > > > > Bug: v8:13257 > > Change-Id: I9ff060f2c0c1ec12977c70d67d65d9c543e2d165 > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4027210 > > Reviewed-by: Michael Lippautz > > Reviewed-by: Omer Katz > > Commit-Queue: Nikolaos Papaspyrou > > Reviewed-by: Dominik Inführ > > Cr-Commit-Position: refs/heads/main@{#84712} > > Bug: v8:13257 > Change-Id: I61df6eeca5a28e04eb3a858f7d601bc5f6312e49 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4092737 > Reviewed-by: Dominik Inführ > Reviewed-by: Omer Katz > Commit-Queue: Nikolaos Papaspyrou > Cr-Commit-Position: refs/heads/main@{#84963} Bug: v8:13257 Change-Id: I3a235f11e5fe55c476591a5274946aeb6cc9bf6e No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111178 Bot-Commit: Rubber Stamper Reviewed-by: Dominik Inführ Commit-Queue: Nikolaos Papaspyrou Cr-Commit-Position: refs/heads/main@{#84973} --- src/heap/heap.cc | 47 +-- src/heap/heap.h | 12 +- src/heap/local-heap.cc | 23 +- src/heap/local-heap.h | 19 - .../cctest/heap/test-concurrent-allocation.cc | 2 +- test/cctest/heap/test-heap.cc | 23 +- .../conservative-stack-visitor-unittest.cc | 393 ------------------ 7 files changed, 37 insertions(+), 482 deletions(-) diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 0852c8b8fa..3c172a09c0 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -2385,8 +2385,6 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator, DCHECK_NOT_NULL(isolate()->global_safepoint()); SaveStackContextScope stack_context_scope(&stack()); - SaveStackContextScope initiator_stack_context_scope( - &initiator->thread_local_top()->stack_); isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) { client->heap()->FreeSharedLinearAllocationAreas(); @@ -4623,8 +4621,7 @@ class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor { #endif // V8_COMPRESS_POINTERS }; -void Heap::IterateRoots(RootVisitor* v, base::EnumSet options, - IterateRootsMode mode) { +void Heap::IterateRoots(RootVisitor* v, base::EnumSet options) { v->VisitRootPointers(Root::kStrongRootList, nullptr, roots_table().strong_roots_begin(), roots_table().strong_roots_end()); @@ -4694,7 +4691,7 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet options, StackState stack_state = options.contains(SkipRoot::kConservativeStack) ? StackState::kNoHeapPointers : StackState::kMayContainHeapPointers; - IterateStackRoots(v, stack_state, mode); + IterateStackRoots(v, stack_state); v->Synchronize(VisitorSynchronization::kStackRoots); } @@ -4815,13 +4812,13 @@ void Heap::IterateRootsIncludingClients(RootVisitor* v, if (isolate()->is_shared_heap_isolate()) { ClientRootVisitor client_root_visitor(v); - // When iterating roots of clients, we assume it will be necessary to scan - // their stacks conservatively regardless of the main isolate's stack state. - options.Remove(SkipRoot::kConservativeStack); + // TODO(v8:13257): We cannot run CSS on client isolates now, as the + // stack markers will not be correct. + options.Add(SkipRoot::kConservativeStack); isolate()->global_safepoint()->IterateClientIsolates( [v = &client_root_visitor, options](Isolate* client) { if (client->is_shared_heap_isolate()) return; - client->heap()->IterateRoots(v, options, IterateRootsMode::kShared); + client->heap()->IterateRoots(v, options); }); } } @@ -4834,12 +4831,9 @@ void Heap::IterateRootsFromStackIncludingClients(RootVisitor* v, ClientRootVisitor client_root_visitor(v); isolate()->global_safepoint()->IterateClientIsolates( [v = &client_root_visitor](Isolate* client) { - if (client->is_shared_heap_isolate()) return; - // When iterating stack roots of clients, we assume they may contain - // heap pointers regardless of the main isolate's stack state. - client->heap()->IterateStackRoots(v, - StackState::kMayContainHeapPointers, - IterateRootsMode::kShared); + // TODO(v8:13257): We cannot run CSS on client isolates now, as the + // stack markers will not be correct. + client->heap()->IterateStackRoots(v, StackState::kNoHeapPointers); }); } } @@ -4867,24 +4861,15 @@ void Heap::IterateBuiltins(RootVisitor* v) { static_assert(Builtins::AllBuiltinsAreIsolateIndependent()); } -void Heap::IterateStackRoots(RootVisitor* v, StackState stack_state, - IterateRootsMode mode) { - isolate()->Iterate(v); +void Heap::IterateStackRoots(RootVisitor* v, StackState stack_state) { + isolate_->Iterate(v); #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING - if (stack_state == StackState::kNoHeapPointers || - disable_conservative_stack_scanning_for_testing_) - return; - - // In case of a shared GC, we're interested in the main isolate for CSS. - Isolate* main_isolate = mode == IterateRootsMode::kShared - ? isolate()->shared_heap_isolate() - : isolate(); - if (main_isolate->heap()->disable_conservative_stack_scanning_for_testing_) - return; - - ConservativeStackVisitor stack_visitor(main_isolate, v); - stack().IteratePointers(&stack_visitor); + if (stack_state == StackState::kMayContainHeapPointers && + !disable_conservative_stack_scanning_for_testing_) { + ConservativeStackVisitor stack_visitor(isolate(), v); + stack().IteratePointers(&stack_visitor); + } #endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING } diff --git a/src/heap/heap.h b/src/heap/heap.h index fe29659ce6..39e6efb080 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -1039,13 +1039,8 @@ class Heap { // garbage collection and is usually only performed as part of // (de)serialization or heap verification. - // In the case of shared GC, kLocal is used for the main isolate and kShared - // for the (other) client isolates. - enum class IterateRootsMode { kLocal, kShared }; - // Iterates over the strong roots and the weak roots. - void IterateRoots(RootVisitor* v, base::EnumSet options, - IterateRootsMode mode = IterateRootsMode::kLocal); + void IterateRoots(RootVisitor* v, base::EnumSet options); void IterateRootsIncludingClients(RootVisitor* v, base::EnumSet options); void IterateRootsFromStackIncludingClients(RootVisitor* v, @@ -1058,8 +1053,7 @@ class Heap { void IterateWeakRoots(RootVisitor* v, base::EnumSet options); void IterateWeakGlobalHandles(RootVisitor* v); void IterateBuiltins(RootVisitor* v); - void IterateStackRoots(RootVisitor* v, StackState stack_state, - IterateRootsMode mode = IterateRootsMode::kLocal); + void IterateStackRoots(RootVisitor* v, StackState stack_state); // =========================================================================== // Remembered set API. ======================================================= @@ -2698,7 +2692,7 @@ class V8_NODISCARD DisableConservativeStackScanningScopeForTesting { heap_->disable_conservative_stack_scanning_for_testing_ = old_value_; } - private: + protected: Heap* heap_; bool old_value_; }; diff --git a/src/heap/local-heap.cc b/src/heap/local-heap.cc index 35b3b74c85..ab357ef9e3 100644 --- a/src/heap/local-heap.cc +++ b/src/heap/local-heap.cc @@ -239,9 +239,7 @@ void LocalHeap::ParkSlowPath() { if (current_state.IsCollectionRequested()) { if (!heap()->ignore_local_gc_requests()) { - ClearStackContext(); heap_->CollectGarbageForBackground(this); - SaveStackContext(); continue; } @@ -296,9 +294,7 @@ void LocalHeap::UnparkSlowPath() { continue; if (!heap()->ignore_local_gc_requests()) { - ClearStackContext(); heap_->CollectGarbageForBackground(this); - SaveStackContext(); } return; @@ -369,19 +365,14 @@ void LocalHeap::SleepInSafepoint() { TRACE_GC1(heap_->tracer(), scope_id, thread_kind); - { - base::Optional stack_context_scope; - if (is_main_thread()) stack_context_scope.emplace(&heap_->stack()); + // Parking the running thread here is an optimization. We do not need to + // wake this thread up to reach the next safepoint. + ThreadState old_state = state_.SetParked(); + CHECK(old_state.IsRunning()); + CHECK(old_state.IsSafepointRequested()); + CHECK_IMPLIES(old_state.IsCollectionRequested(), is_main_thread()); - // Parking the running thread here is an optimization. We do not need to - // wake this thread up to reach the next safepoint. - ThreadState old_state = state_.SetParked(); - CHECK(old_state.IsRunning()); - CHECK(old_state.IsSafepointRequested()); - CHECK_IMPLIES(old_state.IsCollectionRequested(), is_main_thread()); - - heap_->safepoint()->WaitInSafepoint(); - } + heap_->safepoint()->WaitInSafepoint(); base::Optional ignore_gc_requests; if (is_main_thread()) ignore_gc_requests.emplace(heap()); diff --git a/src/heap/local-heap.h b/src/heap/local-heap.h index 982a103434..744dc2edda 100644 --- a/src/heap/local-heap.h +++ b/src/heap/local-heap.h @@ -282,7 +282,6 @@ class V8_EXPORT_PRIVATE LocalHeap { void Park() { DCHECK(AllowSafepoints::IsAllowed()); - SaveStackContextIfMainThread(); ThreadState expected = ThreadState::Running(); if (!state_.CompareExchangeWeak(expected, ThreadState::Parked())) { ParkSlowPath(); @@ -295,7 +294,6 @@ class V8_EXPORT_PRIVATE LocalHeap { if (!state_.CompareExchangeWeak(expected, ThreadState::Running())) { UnparkSlowPath(); } - ClearStackContextIfMainThread(); } void ParkSlowPath(); @@ -314,21 +312,6 @@ class V8_EXPORT_PRIVATE LocalHeap { void SetUp(); void SetUpSharedMarking(); - void SaveStackContext() { - DCHECK(!stack_context_scope_.has_value()); - stack_context_scope_.emplace(&heap_->stack()); - } - - void SaveStackContextIfMainThread() { - if (is_main_thread()) SaveStackContext(); - } - - void ClearStackContext() { stack_context_scope_.reset(); } - - void ClearStackContextIfMainThread() { - if (is_main_thread()) ClearStackContext(); - } - Heap* heap_; bool is_main_thread_; @@ -355,8 +338,6 @@ class V8_EXPORT_PRIVATE LocalHeap { MarkingBarrier* saved_marking_barrier_ = nullptr; - base::Optional stack_context_scope_; - friend class CollectionBarrier; friend class ConcurrentAllocator; friend class GlobalSafepoint; diff --git a/test/cctest/heap/test-concurrent-allocation.cc b/test/cctest/heap/test-concurrent-allocation.cc index 08f1aebc05..79ca12cc32 100644 --- a/test/cctest/heap/test-concurrent-allocation.cc +++ b/test/cctest/heap/test-concurrent-allocation.cc @@ -182,7 +182,7 @@ UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadParksAndUnparks) { threads.push_back(std::move(thread)); } - for (int i = 0; i < 30'000; i++) { + for (int i = 0; i < 300'000; i++) { ParkedScope scope(i_isolate->main_thread_local_isolate()); } diff --git a/test/cctest/heap/test-heap.cc b/test/cctest/heap/test-heap.cc index df280cd61e..77ff826998 100644 --- a/test/cctest/heap/test-heap.cc +++ b/test/cctest/heap/test-heap.cc @@ -6870,20 +6870,17 @@ UNINITIALIZED_TEST(RestoreHeapLimit) { heap->AutomaticallyRestoreInitialHeapLimit(0.5); const int kFixedArrayLength = 1000000; { - DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap); - { - HandleScope handle_scope(isolate); - while (!state.oom_triggered) { - factory->NewFixedArray(kFixedArrayLength); - } + HandleScope handle_scope(isolate); + while (!state.oom_triggered) { + factory->NewFixedArray(kFixedArrayLength); } - heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true); - state.oom_triggered = false; - { - HandleScope handle_scope(isolate); - while (!state.oom_triggered) { - factory->NewFixedArray(kFixedArrayLength); - } + } + heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true); + state.oom_triggered = false; + { + HandleScope handle_scope(isolate); + while (!state.oom_triggered) { + factory->NewFixedArray(kFixedArrayLength); } } CHECK_EQ(state.current_heap_limit, state.initial_heap_limit); diff --git a/test/unittests/heap/conservative-stack-visitor-unittest.cc b/test/unittests/heap/conservative-stack-visitor-unittest.cc index b43f596d4c..f8cfc94162 100644 --- a/test/unittests/heap/conservative-stack-visitor-unittest.cc +++ b/test/unittests/heap/conservative-stack-visitor-unittest.cc @@ -4,9 +4,6 @@ #include "src/heap/conservative-stack-visitor.h" -#include "src/base/optional.h" -#include "src/base/platform/semaphore.h" -#include "src/heap/parked-scope.h" #include "test/unittests/heap/heap-utils.h" #include "test/unittests/test-utils.h" @@ -236,395 +233,5 @@ TEST_F(ConservativeStackVisitorTest, InnerHalfWord2) { #endif // V8_COMPRESS_POINTERS -#if V8_CAN_CREATE_SHARED_HEAP_BOOL - -using ConservativeStackScanningSharedTest = TestJSSharedMemoryWithIsolate; - -namespace { - -// An abstract class for threads that will be used in tests related to -// conservative stack scanning of the shared heap. When running, after -// initialization, it invokes the virtual method `RunTheTest`. The class -// provides basic functionality for allocating an object on the shared heap, -// synchronizing with the main thread (which triggers a GC), and verifying that -// the object has (or has not) been reclaimed by the GC. -class TestStackContextWithSharedHeapThread : public ParkingThread { - public: - TestStackContextWithSharedHeapThread(const char* name, StackState stack_state, - v8::base::Semaphore* sema_ready, - v8::base::Semaphore* sema_gc_complete) - - : ParkingThread(base::Thread::Options(name)), - stack_state_(stack_state), - sema_ready_(sema_ready), - sema_gc_complete_(sema_gc_complete) {} - - void Run() override { - IsolateWrapper isolate_wrapper(kNoCounters); - Isolate* i_client_isolate = - reinterpret_cast(isolate_wrapper.isolate()); - - base::Optional scope; - if (stack_state_ == StackState::kNoHeapPointers) - scope.emplace(i_client_isolate->heap()); - - RunTheTest(i_client_isolate); - } - - virtual void RunTheTest(Isolate* i_client_isolate) = 0; - - // Signal to the main thread to invoke a shared GC, then wait in a safepoint - // until the GC is finished. - V8_INLINE void SignalReadyAndWait(Isolate* i_client_isolate) { - sema_ready_->Signal(); - const auto timeout = base::TimeDelta::FromMilliseconds(100); - do { - i_client_isolate->main_thread_local_isolate()->heap()->Safepoint(); - } while (!sema_gc_complete_->WaitFor(timeout)); - } - - // Allocate an object on the shared heap and add a weak reference. - // Also, allocate some garbage. Return the address of the allocated object. - V8_INLINE Address AllocateObjectAndGarbage(Isolate* i_client_isolate, - Persistent& weak) { - HandleScope handle_scope(i_client_isolate); - Handle h = i_client_isolate->factory()->NewFixedArray( - 256, AllocationType::kSharedOld); - weak.Reset(reinterpret_cast(i_client_isolate), - Utils::FixedArrayToLocal(h)); - weak.SetWeak(); - - // Allocate some garbage on the shared heap. - for (int i = 0; i < 10; ++i) { - i_client_isolate->factory()->NewFixedArray(256, - AllocationType::kSharedOld); - } - - return h->GetHeapObject().address(); - } - - // Check whether an object has been reclaimed by GC. - V8_INLINE void VerifyObject(const Persistent& weak) { - switch (stack_state_) { - case StackState::kNoHeapPointers: - EXPECT_TRUE(weak.IsEmpty()); - break; - case StackState::kMayContainHeapPointers: - EXPECT_FALSE(weak.IsEmpty()); - break; - } - } - - bool IsPreciseGC() const { - return stack_state_ == StackState::kNoHeapPointers; - } - - private: - StackState stack_state_; - v8::base::Semaphore* sema_ready_; - v8::base::Semaphore* sema_gc_complete_; -}; - -// Generic test template for conservative stack scanning of the shared heap. The -// `TestThread` must be a subclass of `TestStackContextWithSharedHeapThread`. -template -void StackContextWithSharedHeapTest(Isolate* isolate, StackState stack_state) { - v8::base::Semaphore sema_thread_ready(0); - v8::base::Semaphore sema_gc_complete(0); - - auto thread = std::make_unique(stack_state, &sema_thread_ready, - &sema_gc_complete); - CHECK(thread->Start()); - - // Wait for the thread to be ready. - sema_thread_ready.Wait(); - - // Invoke shared garbage collection. - isolate->heap()->CollectGarbageShared(isolate->main_thread_local_heap(), - GarbageCollectionReason::kTesting); - - // Signal that the GC has been complete. - sema_gc_complete.Signal(); - - ParkedScope scope(isolate->main_thread_local_isolate()); - thread->ParkedJoin(scope); -} - -// Test scenario #1: The thread just waits, so it is forced into a safepoint. -class TestWaitThread final : public TestStackContextWithSharedHeapThread { - public: - TestWaitThread(StackState stack_state, v8::base::Semaphore* sema_ready, - v8::base::Semaphore* sema_gc_complete) - : TestStackContextWithSharedHeapThread("TestWaitThread", stack_state, - sema_ready, sema_gc_complete) {} - - void RunTheTest(Isolate* i_client_isolate) override { - Persistent weak; - volatile Address ptr_on_stack = - AllocateObjectAndGarbage(i_client_isolate, weak); - - SignalReadyAndWait(i_client_isolate); - - // Make sure to keep the pointer alive. - EXPECT_NE(static_cast(0), ptr_on_stack); - - VerifyObject(weak); - } -}; - -// Test scenario #2: The thread parks and waits. -class TestParkWaitThread final : public TestStackContextWithSharedHeapThread { - public: - TestParkWaitThread(StackState stack_state, v8::base::Semaphore* sema_ready, - v8::base::Semaphore* sema_gc_complete) - : TestStackContextWithSharedHeapThread("TestParkWaitThread", stack_state, - sema_ready, sema_gc_complete) {} - - void RunTheTest(Isolate* i_client_isolate) override { - Persistent weak; - volatile Address ptr_on_stack = - AllocateObjectAndGarbage(i_client_isolate, weak); - - ParkedScope parked_scope(i_client_isolate->main_thread_local_isolate()); - SignalReadyAndWait(i_client_isolate); - - // Make sure to keep the pointer alive. - EXPECT_NE(static_cast(0), ptr_on_stack); - - VerifyObject(weak); - } -}; - -// Test scenario #3: The thread parks, then unparks and waits, so it is forced -// into a safepoint. -class TestParkUnparkWaitThread final - : public TestStackContextWithSharedHeapThread { - public: - TestParkUnparkWaitThread(StackState stack_state, - v8::base::Semaphore* sema_ready, - v8::base::Semaphore* sema_gc_complete) - : TestStackContextWithSharedHeapThread("TestParkUnparkWaitThread", - stack_state, sema_ready, - sema_gc_complete) {} - - void RunTheTest(Isolate* i_client_isolate) override { - Persistent weak; - volatile Address ptr_on_stack = - AllocateObjectAndGarbage(i_client_isolate, weak); - - ParkedScope parked_scope(i_client_isolate->main_thread_local_isolate()); - - // Call KeepRunning, which is not inlined, to add a frame on the stack. - KeepRunning(i_client_isolate); - - // Make sure to keep the pointer alive. - EXPECT_NE(static_cast(0), ptr_on_stack); - - VerifyObject(weak); - } - - V8_NOINLINE void KeepRunning(Isolate* i_client_isolate) { - UnparkedScope unparked_scope(i_client_isolate->main_thread_local_isolate()); - - Persistent weak; - volatile Address ptr_on_stack = - AllocateObjectAndGarbage(i_client_isolate, weak); - - SignalReadyAndWait(i_client_isolate); - - // Make sure to keep the pointer alive. - EXPECT_NE(static_cast(0), ptr_on_stack); - - VerifyObject(weak); - } -}; - -// Test scenario #4: The thread parks, then unparks, then parks again and waits. -class TestParkUnparkParkWaitThread final - : public TestStackContextWithSharedHeapThread { - public: - TestParkUnparkParkWaitThread(StackState stack_state, - v8::base::Semaphore* sema_ready, - v8::base::Semaphore* sema_gc_complete) - : TestStackContextWithSharedHeapThread("TestParkUnparkParkWaitThread", - stack_state, sema_ready, - sema_gc_complete) {} - - void RunTheTest(Isolate* i_client_isolate) override { - Persistent weak; - volatile Address ptr_on_stack = - AllocateObjectAndGarbage(i_client_isolate, weak); - - ParkedScope parked_scope(i_client_isolate->main_thread_local_isolate()); - - // Call KeepRunning, which is not inlined, to add a frame on the stack. - KeepRunning(i_client_isolate); - - // Make sure to keep the pointer alive. - EXPECT_NE(static_cast(0), ptr_on_stack); - - VerifyObject(weak); - } - - V8_NOINLINE void KeepRunning(Isolate* i_client_isolate) { - UnparkedScope unparked_scope(i_client_isolate->main_thread_local_isolate()); - - Persistent weak; - volatile Address ptr_on_stack = - AllocateObjectAndGarbage(i_client_isolate, weak); - - // Call KeepRunningStill, which is not inlined, to add one more frame on the - // stack. - KeepRunningStill(i_client_isolate); - - // Make sure to keep the pointer alive. - EXPECT_NE(static_cast(0), ptr_on_stack); - - VerifyObject(weak); - } - - V8_NOINLINE void KeepRunningStill(Isolate* i_client_isolate) { - ParkedScope parked_scope(i_client_isolate->main_thread_local_isolate()); - SignalReadyAndWait(i_client_isolate); - } -}; - -// Test scenario #5: The thread parks, then unparks, parks again by unrolling -// the stack and waits. -class TestParkUnparkUnrollWaitThread final - : public TestStackContextWithSharedHeapThread { - public: - TestParkUnparkUnrollWaitThread(StackState stack_state, - v8::base::Semaphore* sema_ready, - v8::base::Semaphore* sema_gc_complete) - : TestStackContextWithSharedHeapThread("TestParkUnparkUnrollWaitThread", - stack_state, sema_ready, - sema_gc_complete) {} - - struct AllocationInfo { - Persistent* weak; - volatile Address* ptr = nullptr; - }; - - void RunTheTest(Isolate* i_client_isolate) override { - Persistent weak, weak0, weak1, weak2; - volatile Address ptr_on_stack = - AllocateObjectAndGarbage(i_client_isolate, weak); - - ParkedScope parked_scope(i_client_isolate->main_thread_local_isolate()); - - // Call KeepRunning, which is not inlined, to roll and then unroll the - // stack. - std::vector info = {{&weak0}, {&weak1}, {&weak2}}; - KeepRunning(i_client_isolate, info, 0); - - // Make sure to keep the pointer alive. - EXPECT_NE(static_cast(0), ptr_on_stack); - - VerifyObject(weak); - - // The object referenced by weak0 must be live with CSS, as it there was a - // pointer to it above the stack top. - DCHECK_LT(kPointerDepth0, kUnrollDepth); - VerifyObject(weak0); - - // The object referenced by weak1 may or may not be reclaimed with CSS, as - // there was a pointer to it above the last saved stacked context but below - // the stack top. It should always be reclaimed without CSS. - DCHECK_LT(kUnrollDepth, kPointerDepth1); - DCHECK_LT(kPointerDepth1, kUnparkDepth); - if (IsPreciseGC()) VerifyObject(weak1); - - // The object referenced by weak2 must be always reclaimed (modulo false - // positives for CSS), as the pointer to it was below the last saved stack - // context. - DCHECK_LT(kUnparkDepth, kPointerDepth2); - EXPECT_TRUE(weak2.IsEmpty()); - } - - static constexpr int kPointerDepth0 = 17; - static constexpr int kUnrollDepth = 42; - static constexpr int kPointerDepth1 = 57; - static constexpr int kUnparkDepth = 71; - static constexpr int kPointerDepth2 = 87; - static constexpr int kAllocationDepth = 100; - - V8_NOINLINE void KeepRunning(Isolate* i_client_isolate, - std::vector& info, int depth) { - // At three different recursion depths, store pointers to objects that will - // be allocated later. - if (depth == kPointerDepth0) { - volatile Address ptr_on_stack; - info[0].ptr = &ptr_on_stack; - KeepRunning(i_client_isolate, info, depth + 1); - // Make sure to keep the pointer alive. - EXPECT_NE(static_cast(0), ptr_on_stack); - return; - } - if (depth == kPointerDepth1) { - volatile Address ptr_on_stack; - info[1].ptr = &ptr_on_stack; - KeepRunning(i_client_isolate, info, depth + 1); - // Make sure to keep the pointer alive. - EXPECT_NE(static_cast(0), ptr_on_stack); - return; - } - if (depth == kPointerDepth2) { - volatile Address ptr_on_stack; - info[2].ptr = &ptr_on_stack; - KeepRunning(i_client_isolate, info, depth + 1); - // Make sure to keep the pointer alive. - EXPECT_NE(static_cast(0), ptr_on_stack); - return; - } - // At this depth, wait for GC when unrolling the stack. - if (depth == kUnrollDepth) { - KeepRunning(i_client_isolate, info, depth + 1); - SignalReadyAndWait(i_client_isolate); - return; - } - // At this depth, unpark when rolling and park again when unrolling. - if (depth == kUnparkDepth) { - UnparkedScope unparked_scope( - i_client_isolate->main_thread_local_isolate()); - KeepRunning(i_client_isolate, info, depth + 1); - return; - } - // Keep recursing until the end is reached. - if (depth < kAllocationDepth) { - KeepRunning(i_client_isolate, info, depth + 1); - return; - } - // The end of the recursion: allocate objects and store pointers at - // various recursion depths. - for (auto i : info) - *i.ptr = AllocateObjectAndGarbage(i_client_isolate, *i.weak); - } -}; - -} // namespace - -#define TEST_SCENARIO(name) \ - TEST_F(ConservativeStackScanningSharedTest, \ - StackContextWith##name##Precise) { \ - StackContextWithSharedHeapTest( \ - i_isolate(), StackState::kNoHeapPointers); \ - } \ - TEST_F(ConservativeStackScanningSharedTest, \ - StackContextWith##name##Conservative) { \ - StackContextWithSharedHeapTest( \ - i_isolate(), StackState::kMayContainHeapPointers); \ - } - -TEST_SCENARIO(Wait) -TEST_SCENARIO(ParkWait) -TEST_SCENARIO(ParkUnparkWait) -TEST_SCENARIO(ParkUnparkParkWait) -TEST_SCENARIO(ParkUnparkUnrollWait) - -#undef TEST_SCENARIO - -#endif // V8_CAN_CREATE_SHARED_HEAP_BOOL - } // namespace internal } // namespace v8 From f6d85958e08c0f4fd4083b740ce4059e8cf48064 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Fl=C3=BCckiger?= Date: Wed, 14 Dec 2022 16:09:10 +0000 Subject: [PATCH 023/654] [static-roots] Use operator== in HeapObject::Is##Type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace SafeEquals with normal equals operator in `IsUndefined` and friends. This will allow us to have more efficient checks with static roots, since pointers do not need to be decompressed. After this change calling Is##Type on CodeObjects is no longer possible. This is ensured by dchecks in operator==. The change might reveal more callers that need to be fixed. Bug: v8:13466 Change-Id: I3353d10aebb7a192a77281c44e4159f0da336297 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4106849 Reviewed-by: Camillo Bruni Commit-Queue: Camillo Bruni Reviewed-by: Dominik Inführ Auto-Submit: Olivier Flückiger Cr-Commit-Position: refs/heads/main@{#84974} --- include/v8-local-handle.h | 2 ++ src/objects/objects-inl.h | 2 +- src/profiler/sampling-heap-profiler.cc | 7 ++++++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/include/v8-local-handle.h b/include/v8-local-handle.h index cbf87f949d..633c5633c5 100644 --- a/include/v8-local-handle.h +++ b/include/v8-local-handle.h @@ -53,6 +53,7 @@ class Utils; namespace internal { template class CustomArguments; +class SamplingHeapProfiler; } // namespace internal namespace api_internal { @@ -313,6 +314,7 @@ class Local { friend class BasicTracedReference; template friend class TracedReference; + friend class v8::internal::SamplingHeapProfiler; explicit V8_INLINE Local(T* that) : val_(that) {} V8_INLINE static Local New(Isolate* isolate, T* that) { diff --git a/src/objects/objects-inl.h b/src/objects/objects-inl.h index f2108a8a61..680e870981 100644 --- a/src/objects/objects-inl.h +++ b/src/objects/objects-inl.h @@ -110,7 +110,7 @@ IS_TYPE_FUNCTION_DEF(CodeT) return Is##Type(ReadOnlyRoots(isolate)); \ } \ bool Object::Is##Type(ReadOnlyRoots roots) const { \ - return SafeEquals(roots.Value()); \ + return (*this) == roots.Value(); \ } \ bool Object::Is##Type() const { \ return IsHeapObject() && HeapObject::cast(*this).Is##Type(); \ diff --git a/src/profiler/sampling-heap-profiler.cc b/src/profiler/sampling-heap-profiler.cc index 50a32dd4d5..6747f7bceb 100644 --- a/src/profiler/sampling-heap-profiler.cc +++ b/src/profiler/sampling-heap-profiler.cc @@ -81,7 +81,12 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) { HeapObject heap_object = HeapObject::FromAddress(soon_object); Handle obj(heap_object, isolate_); - Local loc = v8::Utils::ToLocal(obj); + // Since soon_object can be in code space we can't use v8::Utils::ToLocal. + DCHECK(obj.is_null() || + (obj->IsSmi() || + (V8_EXTERNAL_CODE_SPACE_BOOL && IsCodeSpaceObject(heap_object)) || + !obj->IsTheHole())); + Local loc(reinterpret_cast(obj.location())); AllocationNode* node = AddStack(); node->allocations_[size]++; From 0e0057a7d8083f9fa22d6e80a434b6b277ba2ac8 Mon Sep 17 00:00:00 2001 From: pthier Date: Wed, 21 Dec 2022 11:54:25 +0100 Subject: [PATCH 024/654] [maglev][arm64] Port CallBuiltin Drive-by: remove restriction to builtins without VarArgs. Bug: v8:7700 Change-Id: I7a18626bc94460a53f7f25aedf1e30e79b3162ee Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4118867 Commit-Queue: Patrick Thier Reviewed-by: Victor Gomes Cr-Commit-Position: refs/heads/main@{#84975} --- src/maglev/arm64/maglev-ir-arm64.cc | 1 - src/maglev/maglev-graph-builder.h | 2 - src/maglev/maglev-ir.cc | 100 +++++++++++++++++++++++++ src/maglev/maglev-ir.h | 12 ++- src/maglev/x64/maglev-ir-x64.cc | 112 ---------------------------- 5 files changed, 110 insertions(+), 117 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index df023d184b..0e65001862 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -112,7 +112,6 @@ void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm, } UNIMPLEMENTED_NODE_WITH_CALL(Float64Ieee754Unary) -UNIMPLEMENTED_NODE_WITH_CALL(CallBuiltin) UNIMPLEMENTED_NODE_WITH_CALL(Construct) UNIMPLEMENTED_NODE_WITH_CALL(ConstructWithSpread) UNIMPLEMENTED_NODE_WITH_CALL(ConvertReceiver, mode_) diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index 10a9e369d4..7bc0565bde 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -493,8 +493,6 @@ class MaglevGraphBuilder { // TODO(victorgomes): Rename all kFeedbackVector parameters in the builtins // to kVector. DCHECK_EQ(vector_index, Descriptor::kVector); - // Also check that the builtin does not allow var args. - DCHECK_EQ(Descriptor::kAllowVarArgs, false); #endif // DEBUG return call_builtin; } diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 50d1bddfd9..a9302545f6 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -1740,6 +1740,106 @@ void CallKnownJSFunction::GenerateCode(MaglevAssembler* masm, masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } +int CallBuiltin::MaxCallStackArgs() const { + auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); + if (!descriptor.AllowVarArgs()) { + return descriptor.GetStackParameterCount(); + } else { + int all_input_count = InputCountWithoutContext() + (has_feedback() ? 2 : 0); + DCHECK_GE(all_input_count, descriptor.GetRegisterParameterCount()); + return all_input_count - descriptor.GetRegisterParameterCount(); + } +} + +void CallBuiltin::SetValueLocationConstraints() { + auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); + bool has_context = descriptor.HasContextParameter(); + int i = 0; + for (; i < InputsInRegisterCount(); i++) { + UseFixed(input(i), descriptor.GetRegisterParameter(i)); + } + for (; i < InputCountWithoutContext(); i++) { + UseAny(input(i)); + } + if (has_context) { + UseFixed(input(i), kContextRegister); + } + DefineAsFixed(this, kReturnRegister0); +} + +template +void CallBuiltin::PushArguments(MaglevAssembler* masm, Args... extra_args) { + auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); + if (descriptor.GetStackArgumentOrder() == StackArgumentOrder::kDefault) { + // In Default order we cannot have extra args (feedback). + DCHECK_EQ(sizeof...(extra_args), 0); + __ Push(base::make_iterator_range(stack_args_begin(), stack_args_end())); + } else { + DCHECK_EQ(descriptor.GetStackArgumentOrder(), StackArgumentOrder::kJS); + __ PushReverse(extra_args..., base::make_iterator_range(stack_args_begin(), + stack_args_end())); + } +} + +void CallBuiltin::PassFeedbackSlotInRegister(MaglevAssembler* masm) { + DCHECK(has_feedback()); + auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); + int slot_index = InputCountWithoutContext(); + switch (slot_type()) { + case kTaggedIndex: + __ Move(descriptor.GetRegisterParameter(slot_index), + TaggedIndex::FromIntptr(feedback().index())); + break; + case kSmi: + __ Move(descriptor.GetRegisterParameter(slot_index), + Smi::FromInt(feedback().index())); + break; + } +} + +void CallBuiltin::PushFeedbackAndArguments(MaglevAssembler* masm) { + DCHECK(has_feedback()); + + auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); + int slot_index = InputCountWithoutContext(); + int vector_index = slot_index + 1; + + // There are three possibilities: + // 1. Feedback slot and vector are in register. + // 2. Feedback slot is in register and vector is on stack. + // 3. Feedback slot and vector are on stack. + if (vector_index < descriptor.GetRegisterParameterCount()) { + PassFeedbackSlotInRegister(masm); + __ Move(descriptor.GetRegisterParameter(vector_index), feedback().vector); + PushArguments(masm); + } else if (vector_index == descriptor.GetRegisterParameterCount()) { + PassFeedbackSlotInRegister(masm); + PushArguments(masm, feedback().vector); + } else { + int slot = feedback().index(); + Handle vector = feedback().vector; + switch (slot_type()) { + case kTaggedIndex: + PushArguments(masm, TaggedIndex::FromIntptr(slot), vector); + break; + case kSmi: + PushArguments(masm, Smi::FromInt(slot), vector); + break; + } + } +} + +void CallBuiltin::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + if (has_feedback()) { + PushFeedbackAndArguments(masm); + } else { + PushArguments(masm); + } + __ CallBuiltin(builtin()); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); +} + int CallRuntime::MaxCallStackArgs() const { return num_args(); } void CallRuntime::SetValueLocationConstraints() { UseFixed(context(), kContextRegister); diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index b697154dff..96177c4bb2 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -5043,6 +5043,13 @@ class CallBuiltin : public ValueNodeT { return descriptor.GetRegisterParameterCount(); } + auto stack_args_begin() { + return std::make_reverse_iterator(&input(InputsInRegisterCount() - 1)); + } + auto stack_args_end() { + return std::make_reverse_iterator(&input(InputCountWithoutContext() - 1)); + } + void set_arg(int i, ValueNode* node) { set_input(i, node); } int ReturnCount() const { @@ -5056,9 +5063,10 @@ class CallBuiltin : public ValueNodeT { void PrintParams(std::ostream&, MaglevGraphLabeller*) const; private: - void PassFeedbackSlotOnStack(MaglevAssembler*); + template + void PushArguments(MaglevAssembler* masm, Args... extra_args); void PassFeedbackSlotInRegister(MaglevAssembler*); - void PushFeedback(MaglevAssembler*); + void PushFeedbackAndArguments(MaglevAssembler*); Builtin builtin_; base::Optional feedback_; diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index b0a08bf6b0..b0b0975ac2 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -2589,118 +2589,6 @@ void Construct::GenerateCode(MaglevAssembler* masm, masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } -int CallBuiltin::MaxCallStackArgs() const { - auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); - if (!descriptor.AllowVarArgs()) { - return descriptor.GetStackParameterCount(); - } else { - int all_input_count = InputCountWithoutContext() + (has_feedback() ? 2 : 0); - DCHECK_GE(all_input_count, descriptor.GetRegisterParameterCount()); - return all_input_count - descriptor.GetRegisterParameterCount(); - } -} -void CallBuiltin::SetValueLocationConstraints() { - auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); - bool has_context = descriptor.HasContextParameter(); - int i = 0; - for (; i < InputsInRegisterCount(); i++) { - UseFixed(input(i), descriptor.GetRegisterParameter(i)); - } - for (; i < InputCountWithoutContext(); i++) { - UseAny(input(i)); - } - if (has_context) { - UseFixed(input(i), kContextRegister); - } - DefineAsFixed(this, kReturnRegister0); -} - -void CallBuiltin::PassFeedbackSlotOnStack(MaglevAssembler* masm) { - DCHECK(has_feedback()); - switch (slot_type()) { - case kTaggedIndex: - __ Push(TaggedIndex::FromIntptr(feedback().index())); - break; - case kSmi: - __ Push(Smi::FromInt(feedback().index())); - break; - } -} - -void CallBuiltin::PassFeedbackSlotInRegister(MaglevAssembler* masm) { - DCHECK(has_feedback()); - auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); - int slot_index = InputCountWithoutContext(); - switch (slot_type()) { - case kTaggedIndex: - __ Move(descriptor.GetRegisterParameter(slot_index), - TaggedIndex::FromIntptr(feedback().index())); - break; - case kSmi: - __ Move(descriptor.GetRegisterParameter(slot_index), - Smi::FromInt(feedback().index())); - break; - } -} - -void CallBuiltin::PushFeedback(MaglevAssembler* masm) { - DCHECK(has_feedback()); - - auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); - int slot_index = InputCountWithoutContext(); - int vector_index = slot_index + 1; - - // There are three possibilities: - // 1. Feedback slot and vector are in register. - // 2. Feedback slot is in register and vector is on stack. - // 3. Feedback slot and vector are on stack. - if (vector_index < descriptor.GetRegisterParameterCount()) { - PassFeedbackSlotInRegister(masm); - __ Move(descriptor.GetRegisterParameter(vector_index), feedback().vector); - } else if (vector_index == descriptor.GetRegisterParameterCount()) { - PassFeedbackSlotInRegister(masm); - // We do not allow var args if has_feedback(), so here we have only one - // parameter on stack and do not need to check stack arguments order. - __ Push(feedback().vector); - } else { - // Same as above. We does not allow var args if has_feedback(), so feedback - // slot and vector must be last two inputs. - if (descriptor.GetStackArgumentOrder() == StackArgumentOrder::kDefault) { - PassFeedbackSlotOnStack(masm); - __ Push(feedback().vector); - } else { - DCHECK_EQ(descriptor.GetStackArgumentOrder(), StackArgumentOrder::kJS); - __ Push(feedback().vector); - PassFeedbackSlotOnStack(masm); - } - } -} - -void CallBuiltin::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); - - if (descriptor.GetStackArgumentOrder() == StackArgumentOrder::kDefault) { - for (int i = InputsInRegisterCount(); i < InputCountWithoutContext(); ++i) { - __ Push(input(i)); - } - if (has_feedback()) { - PushFeedback(masm); - } - } else { - DCHECK_EQ(descriptor.GetStackArgumentOrder(), StackArgumentOrder::kJS); - if (has_feedback()) { - PushFeedback(masm); - } - for (int i = InputCountWithoutContext() - 1; i >= InputsInRegisterCount(); - --i) { - __ Push(input(i)); - } - } - __ CallBuiltin(builtin()); - masm->DefineExceptionHandlerAndLazyDeoptPoint(this); -} - int ConstructWithSpread::MaxCallStackArgs() const { int argc_no_spread = num_args() - 1; using D = CallInterfaceDescriptorFor< From 98697660979ab8396583937fbf484fcd038bd87c Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Wed, 21 Dec 2022 14:00:45 +0100 Subject: [PATCH 025/654] [maglev][arm64] Add BranchIfReceiver + BranchIfUndefOrNull Bug: v8:7700 Change-Id: I5b80062b463b07a9354909638415d834e128ec55 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111529 Commit-Queue: Patrick Thier Auto-Submit: Victor Gomes Reviewed-by: Patrick Thier Commit-Queue: Victor Gomes Cr-Commit-Position: refs/heads/main@{#84976} --- src/maglev/arm64/maglev-ir-arm64.cc | 16 ++++++++++++++-- src/maglev/maglev-ir.cc | 14 ++++++++++++++ src/maglev/x64/maglev-ir-x64.cc | 14 -------------- 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 0e65001862..46447e903e 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -148,8 +148,6 @@ UNIMPLEMENTED_NODE_WITH_CALL(ThrowReferenceErrorIfHole) UNIMPLEMENTED_NODE_WITH_CALL(ThrowSuperNotCalledIfHole) UNIMPLEMENTED_NODE_WITH_CALL(ThrowSuperAlreadyCalledIfNotHole) UNIMPLEMENTED_NODE_WITH_CALL(ThrowIfNotSuperConstructor) -UNIMPLEMENTED_NODE(BranchIfUndefinedOrNull) -UNIMPLEMENTED_NODE(BranchIfJSReceiver) UNIMPLEMENTED_NODE(Switch) int BuiltinStringFromCharCode::MaxCallStackArgs() const { @@ -1394,6 +1392,20 @@ void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { __ Ret(); } +void BranchIfJSReceiver::SetValueLocationConstraints() { + UseRegister(condition_input()); +} +void BranchIfJSReceiver::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + Register value = ToRegister(condition_input()); + __ JumpIfSmi(value, if_false()->label()); + __ LoadMap(scratch, value); + __ CompareInstanceType(scratch, scratch, FIRST_JS_RECEIVER_TYPE); + __ Branch(hs, if_true(), if_false(), state.next_block()); +} + void BranchIfFloat64Compare::SetValueLocationConstraints() { UseRegister(left_input()); UseRegister(right_input()); diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index a9302545f6..52c6e06d36 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -2038,6 +2038,20 @@ void BranchIfInt32Compare::GenerateCode(MaglevAssembler* masm, state.next_block()); } +void BranchIfUndefinedOrNull::SetValueLocationConstraints() { + UseRegister(condition_input()); +} +void BranchIfUndefinedOrNull::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register value = ToRegister(condition_input()); + __ JumpIfRoot(value, RootIndex::kUndefinedValue, if_true()->label()); + __ JumpIfRoot(value, RootIndex::kNullValue, if_true()->label()); + auto* next_block = state.next_block(); + if (if_false() != next_block) { + __ Jump(if_false()->label()); + } +} + // --- // Print params // --- diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index b0b0975ac2..ca0cfeeae4 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -2980,20 +2980,6 @@ void JumpLoopPrologue::GenerateCode(MaglevAssembler* masm, __ bind(*no_code_for_osr); } -void BranchIfUndefinedOrNull::SetValueLocationConstraints() { - UseRegister(condition_input()); -} -void BranchIfUndefinedOrNull::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - Register value = ToRegister(condition_input()); - __ JumpIfRoot(value, RootIndex::kUndefinedValue, if_true()->label()); - __ JumpIfRoot(value, RootIndex::kNullValue, if_true()->label()); - auto* next_block = state.next_block(); - if (if_false() != next_block) { - __ jmp(if_false()->label()); - } -} - void BranchIfJSReceiver::SetValueLocationConstraints() { UseRegister(condition_input()); } From 3a232e7ab83c567480db04926d53231ea4e69393 Mon Sep 17 00:00:00 2001 From: Manos Koukoutos Date: Wed, 21 Dec 2022 14:18:03 +0000 Subject: [PATCH 026/654] Revert "[maglev] Destroy jobs on BG thread" This reverts commit 8623fd473b8742eec2d6780db72b982130b55da4. Reason for revert: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20TSAN%20-%20no-concurrent-marking/12508/overview Original change's description: > [maglev] Destroy jobs on BG thread > > Maglev job destruction is suprisingly expensive, taking up roughly a > third of total finalization time. Rather than destroying jobs as part of > finalization, re-post them to the concurrent dispatcher to be destroyed > in the background > > Bug: v8:7700 > Change-Id: I450d8a7b49737504c2aaebbfa7754e0ae15e7f05 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111238 > Reviewed-by: Victor Gomes > Auto-Submit: Leszek Swirski > Commit-Queue: Leszek Swirski > Cr-Commit-Position: refs/heads/main@{#84970} Bug: v8:7700 Change-Id: I797a34529652c814b11cd13309b2d3a5ff68266e No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4115747 Bot-Commit: Rubber Stamper Owners-Override: Manos Koukoutos Commit-Queue: Manos Koukoutos Cr-Commit-Position: refs/heads/main@{#84977} --- src/maglev/maglev-concurrent-dispatcher.cc | 20 ++------------------ src/maglev/maglev-concurrent-dispatcher.h | 1 - 2 files changed, 2 insertions(+), 19 deletions(-) diff --git a/src/maglev/maglev-concurrent-dispatcher.cc b/src/maglev/maglev-concurrent-dispatcher.cc index aa5acf7a99..a098505e4d 100644 --- a/src/maglev/maglev-concurrent-dispatcher.cc +++ b/src/maglev/maglev-concurrent-dispatcher.cc @@ -158,7 +158,6 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask { LocalIsolate local_isolate(isolate(), ThreadKind::kBackground); DCHECK(local_isolate.heap()->IsParked()); - bool job_was_executed = false; while (!incoming_queue()->IsEmpty() && !delegate->ShouldYield()) { std::unique_ptr job; if (!incoming_queue()->Dequeue(&job)) break; @@ -169,32 +168,20 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask { RuntimeCallStats* rcs = nullptr; // TODO(v8:7700): Implement. CompilationJob::Status status = job->ExecuteJob(rcs, &local_isolate); if (status == CompilationJob::SUCCEEDED) { - job_was_executed = true; outgoing_queue()->Enqueue(std::move(job)); } } - if (job_was_executed) { - isolate()->stack_guard()->RequestInstallMaglevCode(); - } - // Maglev jobs aren't cheap to destruct, so destroy them here in the - // background thread rather than on the main thread. - while (!destruction_queue()->IsEmpty() && !delegate->ShouldYield()) { - std::unique_ptr job; - if (!destruction_queue()->Dequeue(&job)) break; - DCHECK_NOT_NULL(job); - job.reset(); - } + isolate()->stack_guard()->RequestInstallMaglevCode(); } size_t GetMaxConcurrency(size_t) const override { - return incoming_queue()->size() + destruction_queue()->size(); + return incoming_queue()->size(); } private: Isolate* isolate() const { return dispatcher_->isolate_; } QueueT* incoming_queue() const { return &dispatcher_->incoming_queue_; } QueueT* outgoing_queue() const { return &dispatcher_->outgoing_queue_; } - QueueT* destruction_queue() const { return &dispatcher_->destruction_queue_; } MaglevConcurrentDispatcher* const dispatcher_; const Handle function_; @@ -237,9 +224,6 @@ void MaglevConcurrentDispatcher::FinalizeFinishedJobs() { "V8.MaglevConcurrentFinalize", job.get(), TRACE_EVENT_FLAG_FLOW_IN); Compiler::FinalizeMaglevCompilationJob(job.get(), isolate_); - // Maglev jobs aren't cheap to destruct, so re-enqueue them for destruction - // on a background thread. - destruction_queue_.Enqueue(std::move(job)); } } diff --git a/src/maglev/maglev-concurrent-dispatcher.h b/src/maglev/maglev-concurrent-dispatcher.h index 20a355f211..28343f5dd5 100644 --- a/src/maglev/maglev-concurrent-dispatcher.h +++ b/src/maglev/maglev-concurrent-dispatcher.h @@ -100,7 +100,6 @@ class MaglevConcurrentDispatcher final { std::unique_ptr job_handle_; QueueT incoming_queue_; QueueT outgoing_queue_; - QueueT destruction_queue_; }; } // namespace maglev From da4fa98a7d11f4f37e591b4c0a90b7bd17ba0b71 Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Wed, 21 Dec 2022 15:06:42 +0100 Subject: [PATCH 027/654] [maglev][arm64] Add ThrowIf*** IRs Bug: v8:7700 Change-Id: I7229a10e5b7d0236a90a4d0a3456e1730fe35095 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4120066 Auto-Submit: Victor Gomes Commit-Queue: Victor Gomes Reviewed-by: Patrick Thier Cr-Commit-Position: refs/heads/main@{#84978} --- src/maglev/arm64/maglev-assembler-arm64-inl.h | 14 ++++ src/maglev/arm64/maglev-ir-arm64.cc | 28 ++++++-- src/maglev/maglev-assembler.h | 2 + src/maglev/maglev-ir.cc | 52 ++++++++++++++ src/maglev/x64/maglev-assembler-x64-inl.h | 11 +++ src/maglev/x64/maglev-ir-x64.cc | 70 ------------------- 6 files changed, 103 insertions(+), 74 deletions(-) diff --git a/src/maglev/arm64/maglev-assembler-arm64-inl.h b/src/maglev/arm64/maglev-assembler-arm64-inl.h index 770d5df814..3dedb86892 100644 --- a/src/maglev/arm64/maglev-assembler-arm64-inl.h +++ b/src/maglev/arm64/maglev-assembler-arm64-inl.h @@ -241,6 +241,20 @@ void MaglevAssembler::PushReverse(T... vals) { detail::PushAllReverse(this, vals...); } +inline Condition MaglevAssembler::IsRootConstant(Input input, + RootIndex root_index) { + if (input.operand().IsRegister()) { + CompareRoot(ToRegister(input), root_index); + } else { + DCHECK(input.operand().IsStackSlot()); + UseScratchRegisterScope temps(this); + Register scratch = temps.AcquireX(); + Ldr(scratch, ToMemOperand(input)); + CompareRoot(scratch, root_index); + } + return eq; +} + void MaglevAssembler::Branch(Condition condition, BasicBlock* if_true, BasicBlock* if_false, BasicBlock* next_block) { // We don't have any branch probability information, so try to jump diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 46447e903e..a40c2e9611 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -144,10 +144,6 @@ UNIMPLEMENTED_NODE(StoreSignedIntDataViewElement, type_) UNIMPLEMENTED_NODE(StoreDoubleDataViewElement) UNIMPLEMENTED_NODE(StoreTaggedFieldNoWriteBarrier) UNIMPLEMENTED_NODE_WITH_CALL(StoreTaggedFieldWithWriteBarrier) -UNIMPLEMENTED_NODE_WITH_CALL(ThrowReferenceErrorIfHole) -UNIMPLEMENTED_NODE_WITH_CALL(ThrowSuperNotCalledIfHole) -UNIMPLEMENTED_NODE_WITH_CALL(ThrowSuperAlreadyCalledIfNotHole) -UNIMPLEMENTED_NODE_WITH_CALL(ThrowIfNotSuperConstructor) UNIMPLEMENTED_NODE(Switch) int BuiltinStringFromCharCode::MaxCallStackArgs() const { @@ -1349,6 +1345,30 @@ void StringLength::GenerateCode(MaglevAssembler* masm, FieldMemOperand(object, String::kLengthOffset)); } +int ThrowIfNotSuperConstructor::MaxCallStackArgs() const { return 2; } +void ThrowIfNotSuperConstructor::SetValueLocationConstraints() { + UseRegister(constructor()); + UseRegister(function()); +} +void ThrowIfNotSuperConstructor::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + DeferredCodeInfo* deferred_abort = __ PushDeferredCode( + [](MaglevAssembler* masm, ThrowIfNotSuperConstructor* node) { + __ Push(ToRegister(node->constructor()), ToRegister(node->function())); + __ Move(kContextRegister, masm->native_context().object()); + __ CallRuntime(Runtime::kThrowNotSuperConstructor, 2); + masm->DefineExceptionHandlerAndLazyDeoptPoint(node); + __ Abort(AbortReason::kUnexpectedReturnFromThrow); + }, + this); + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ LoadMap(scratch, ToRegister(constructor())); + __ Ldr(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); + __ TestAndBranchIfAllClear(scratch, Map::Bits1::IsConstructorBit::kMask, + &deferred_abort->deferred_code_label); +} + // --- // Control nodes // --- diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index 2f678286e5..cf977b8815 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -78,6 +78,8 @@ class MaglevAssembler : public MacroAssembler { void LoadSingleCharacterString(Register result, Register char_code, Register scratch); + inline Condition IsRootConstant(Input input, RootIndex root_index); + inline void Branch(Condition condition, BasicBlock* if_true, BasicBlock* if_false, BasicBlock* next_block); inline Register FromAnyToRegister(const Input& input, Register scratch); diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 52c6e06d36..6c5385dad7 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -1583,6 +1583,58 @@ void ToNumberOrNumeric::GenerateCode(MaglevAssembler* masm, masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } +int ThrowReferenceErrorIfHole::MaxCallStackArgs() const { return 1; } +void ThrowReferenceErrorIfHole::SetValueLocationConstraints() { + UseAny(value()); +} +void ThrowReferenceErrorIfHole::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + __ JumpToDeferredIf( + __ IsRootConstant(value(), RootIndex::kTheHoleValue), + [](MaglevAssembler* masm, ThrowReferenceErrorIfHole* node) { + __ Move(kContextRegister, masm->native_context().object()); + __ Push(node->name().object()); + __ CallRuntime(Runtime::kThrowAccessedUninitializedVariable, 1); + masm->DefineExceptionHandlerAndLazyDeoptPoint(node); + __ Abort(AbortReason::kUnexpectedReturnFromThrow); + }, + this); +} + +int ThrowSuperNotCalledIfHole::MaxCallStackArgs() const { return 0; } +void ThrowSuperNotCalledIfHole::SetValueLocationConstraints() { + UseAny(value()); +} +void ThrowSuperNotCalledIfHole::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + __ JumpToDeferredIf( + __ IsRootConstant(value(), RootIndex::kTheHoleValue), + [](MaglevAssembler* masm, ThrowSuperNotCalledIfHole* node) { + __ Move(kContextRegister, masm->native_context().object()); + __ CallRuntime(Runtime::kThrowSuperNotCalled, 0); + masm->DefineExceptionHandlerAndLazyDeoptPoint(node); + __ Abort(AbortReason::kUnexpectedReturnFromThrow); + }, + this); +} + +int ThrowSuperAlreadyCalledIfNotHole::MaxCallStackArgs() const { return 0; } +void ThrowSuperAlreadyCalledIfNotHole::SetValueLocationConstraints() { + UseAny(value()); +} +void ThrowSuperAlreadyCalledIfNotHole::GenerateCode( + MaglevAssembler* masm, const ProcessingState& state) { + __ JumpToDeferredIf( + NegateCondition(__ IsRootConstant(value(), RootIndex::kTheHoleValue)), + [](MaglevAssembler* masm, ThrowSuperAlreadyCalledIfNotHole* node) { + __ Move(kContextRegister, masm->native_context().object()); + __ CallRuntime(Runtime::kThrowSuperAlreadyCalledError, 0); + masm->DefineExceptionHandlerAndLazyDeoptPoint(node); + __ Abort(AbortReason::kUnexpectedReturnFromThrow); + }, + this); +} + void TruncateUint32ToInt32::SetValueLocationConstraints() { UseRegister(input()); DefineSameAsFirst(this); diff --git a/src/maglev/x64/maglev-assembler-x64-inl.h b/src/maglev/x64/maglev-assembler-x64-inl.h index 8c8195b543..27cd4ebf32 100644 --- a/src/maglev/x64/maglev-assembler-x64-inl.h +++ b/src/maglev/x64/maglev-assembler-x64-inl.h @@ -129,6 +129,17 @@ void MaglevAssembler::PushReverse(T... vals) { detail::PushAllHelper::PushReverse(this, vals...); } +inline Condition MaglevAssembler::IsRootConstant(Input input, + RootIndex root_index) { + if (input.operand().IsRegister()) { + CompareRoot(ToRegister(input), root_index); + } else { + DCHECK(input.operand().IsStackSlot()); + CompareRoot(ToMemOperand(input), root_index); + } + return equal; +} + void MaglevAssembler::Branch(Condition condition, BasicBlock* if_true, BasicBlock* if_false, BasicBlock* next_block) { // We don't have any branch probability information, so try to jump diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index ca0cfeeae4..0f11cfaec2 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -2717,76 +2717,6 @@ void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm, __ bind(*done); } -int ThrowReferenceErrorIfHole::MaxCallStackArgs() const { return 1; } -void ThrowReferenceErrorIfHole::SetValueLocationConstraints() { - UseAny(value()); -} -void ThrowReferenceErrorIfHole::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - if (value().operand().IsRegister()) { - __ CompareRoot(ToRegister(value()), RootIndex::kTheHoleValue); - } else { - DCHECK(value().operand().IsStackSlot()); - __ CompareRoot(masm->ToMemOperand(value()), RootIndex::kTheHoleValue); - } - __ JumpToDeferredIf( - equal, - [](MaglevAssembler* masm, ThrowReferenceErrorIfHole* node) { - __ Move(kContextRegister, masm->native_context().object()); - __ Push(node->name().object()); - __ CallRuntime(Runtime::kThrowAccessedUninitializedVariable, 1); - masm->DefineExceptionHandlerAndLazyDeoptPoint(node); - __ Abort(AbortReason::kUnexpectedReturnFromThrow); - }, - this); -} - -int ThrowSuperNotCalledIfHole::MaxCallStackArgs() const { return 0; } -void ThrowSuperNotCalledIfHole::SetValueLocationConstraints() { - UseAny(value()); -} -void ThrowSuperNotCalledIfHole::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - if (value().operand().IsRegister()) { - __ CompareRoot(ToRegister(value()), RootIndex::kTheHoleValue); - } else { - DCHECK(value().operand().IsStackSlot()); - __ CompareRoot(masm->ToMemOperand(value()), RootIndex::kTheHoleValue); - } - __ JumpToDeferredIf( - equal, - [](MaglevAssembler* masm, ThrowSuperNotCalledIfHole* node) { - __ Move(kContextRegister, masm->native_context().object()); - __ CallRuntime(Runtime::kThrowSuperNotCalled, 0); - masm->DefineExceptionHandlerAndLazyDeoptPoint(node); - __ Abort(AbortReason::kUnexpectedReturnFromThrow); - }, - this); -} - -int ThrowSuperAlreadyCalledIfNotHole::MaxCallStackArgs() const { return 0; } -void ThrowSuperAlreadyCalledIfNotHole::SetValueLocationConstraints() { - UseAny(value()); -} -void ThrowSuperAlreadyCalledIfNotHole::GenerateCode( - MaglevAssembler* masm, const ProcessingState& state) { - if (value().operand().IsRegister()) { - __ CompareRoot(ToRegister(value()), RootIndex::kTheHoleValue); - } else { - DCHECK(value().operand().IsStackSlot()); - __ CompareRoot(masm->ToMemOperand(value()), RootIndex::kTheHoleValue); - } - __ JumpToDeferredIf( - not_equal, - [](MaglevAssembler* masm, ThrowSuperAlreadyCalledIfNotHole* node) { - __ Move(kContextRegister, masm->native_context().object()); - __ CallRuntime(Runtime::kThrowSuperAlreadyCalledError, 0); - masm->DefineExceptionHandlerAndLazyDeoptPoint(node); - __ Abort(AbortReason::kUnexpectedReturnFromThrow); - }, - this); -} - int ThrowIfNotSuperConstructor::MaxCallStackArgs() const { return 2; } void ThrowIfNotSuperConstructor::SetValueLocationConstraints() { UseRegister(constructor()); From dc950c32bd5262d66d845d2bfeb1ff4a17a857bc Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Wed, 21 Dec 2022 17:31:45 +0100 Subject: [PATCH 028/654] [maglev][arm64] Add Switch and Generator IRs Bug: v8:7700 Change-Id: I63012676d85a97a06b2869666d17b9410e7ff71d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4118773 Auto-Submit: Victor Gomes Reviewed-by: Patrick Thier Commit-Queue: Victor Gomes Commit-Queue: Patrick Thier Cr-Commit-Position: refs/heads/main@{#84979} --- src/codegen/arm64/macro-assembler-arm64.cc | 22 +++ src/codegen/arm64/macro-assembler-arm64.h | 9 ++ src/maglev/arm64/maglev-assembler-arm64-inl.h | 8 ++ src/maglev/arm64/maglev-ir-arm64.cc | 125 +++++++++++++++++- src/maglev/maglev-assembler.cc | 17 +++ src/maglev/maglev-assembler.h | 4 +- src/maglev/maglev-basic-block.h | 8 ++ src/maglev/maglev-code-generator.cc | 3 +- src/maglev/maglev-ir.cc | 65 +++++++++ src/maglev/x64/maglev-assembler-x64-inl.h | 21 +-- src/maglev/x64/maglev-ir-x64.cc | 58 -------- 11 files changed, 258 insertions(+), 82 deletions(-) diff --git a/src/codegen/arm64/macro-assembler-arm64.cc b/src/codegen/arm64/macro-assembler-arm64.cc index 4c5894ef5e..91f43d69ac 100644 --- a/src/codegen/arm64/macro-assembler-arm64.cc +++ b/src/codegen/arm64/macro-assembler-arm64.cc @@ -1112,6 +1112,28 @@ void TurboAssembler::Abs(const Register& rd, const Register& rm, } } +void TurboAssembler::Switch(Register scratch, Register value, + int case_value_base, Label** labels, + int num_labels) { + Register table = scratch; + Label fallthrough, jump_table; + if (case_value_base != 0) { + Sub(value, value, case_value_base); + } + Cmp(value, Immediate(num_labels)); + B(&fallthrough, hs); + Adr(table, &jump_table); + Ldr(table, MemOperand(table, value, LSL, kSystemPointerSizeLog2)); + Br(table); + // Emit the jump table inline, under the assumption that it's not too big. + Align(kSystemPointerSize); + bind(&jump_table); + for (int i = 0; i < num_labels; ++i) { + dcptr(labels[i]); + } + bind(&fallthrough); +} + // Abstracted stack operations. void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, diff --git a/src/codegen/arm64/macro-assembler-arm64.h b/src/codegen/arm64/macro-assembler-arm64.h index bc848778eb..a4c7f390f7 100644 --- a/src/codegen/arm64/macro-assembler-arm64.h +++ b/src/codegen/arm64/macro-assembler-arm64.h @@ -791,6 +791,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { LS_MACRO_LIST(DECLARE_FUNCTION) #undef DECLARE_FUNCTION + void Switch(Register scratch, Register value, int case_value_base, + Label** labels, int num_labels); + // Push or pop up to 4 registers of the same width to or from the stack. // // If an argument register is 'NoReg', all further arguments are also assumed @@ -1416,6 +1419,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void StoreTaggedField(const Register& value, const MemOperand& dst_field_operand); + // For compatibility with platform-independent code. + void StoreTaggedField(const MemOperand& dst_field_operand, + const Register& value) { + StoreTaggedField(value, dst_field_operand); + } + void AtomicStoreTaggedField(const Register& value, const Register& dst_base, const Register& dst_index, const Register& temp); diff --git a/src/maglev/arm64/maglev-assembler-arm64-inl.h b/src/maglev/arm64/maglev-assembler-arm64-inl.h index 3dedb86892..2b520431b6 100644 --- a/src/maglev/arm64/maglev-assembler-arm64-inl.h +++ b/src/maglev/arm64/maglev-assembler-arm64-inl.h @@ -241,6 +241,14 @@ void MaglevAssembler::PushReverse(T... vals) { detail::PushAllReverse(this, vals...); } +inline void MaglevAssembler::BindBlock(BasicBlock* block) { + if (block->is_start_block_of_switch_case()) { + BindJumpTarget(block->label()); + } else { + Bind(block->label()); + } +} + inline Condition MaglevAssembler::IsRootConstant(Input input, RootIndex root_index) { if (input.operand().IsRegister()) { diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index a40c2e9611..41019bb727 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -115,7 +115,6 @@ UNIMPLEMENTED_NODE_WITH_CALL(Float64Ieee754Unary) UNIMPLEMENTED_NODE_WITH_CALL(Construct) UNIMPLEMENTED_NODE_WITH_CALL(ConstructWithSpread) UNIMPLEMENTED_NODE_WITH_CALL(ConvertReceiver, mode_) -UNIMPLEMENTED_NODE(GeneratorRestoreRegister) UNIMPLEMENTED_NODE(LoadSignedIntDataViewElement, type_) UNIMPLEMENTED_NODE(LoadDoubleDataViewElement) UNIMPLEMENTED_NODE(LoadSignedIntTypedArrayElement, elements_kind_) @@ -136,15 +135,12 @@ UNIMPLEMENTED_NODE(CheckJSObjectElementsBounds) UNIMPLEMENTED_NODE(CheckJSTypedArrayBounds, elements_kind_) UNIMPLEMENTED_NODE(CheckMaps, check_type_) UNIMPLEMENTED_NODE_WITH_CALL(CheckMapsWithMigration, check_type_) -UNIMPLEMENTED_NODE_WITH_CALL(GeneratorStore) UNIMPLEMENTED_NODE_WITH_CALL(JumpLoopPrologue, loop_depth_, unit_) UNIMPLEMENTED_NODE_WITH_CALL(StoreMap) UNIMPLEMENTED_NODE(StoreDoubleField) UNIMPLEMENTED_NODE(StoreSignedIntDataViewElement, type_) UNIMPLEMENTED_NODE(StoreDoubleDataViewElement) -UNIMPLEMENTED_NODE(StoreTaggedFieldNoWriteBarrier) UNIMPLEMENTED_NODE_WITH_CALL(StoreTaggedFieldWithWriteBarrier) -UNIMPLEMENTED_NODE(Switch) int BuiltinStringFromCharCode::MaxCallStackArgs() const { return AllocateDescriptor::GetStackParameterCount(); @@ -1187,6 +1183,127 @@ void CheckedFloat64Unbox::GenerateCode(MaglevAssembler* masm, __ bind(&done); } +int GeneratorStore::MaxCallStackArgs() const { + return WriteBarrierDescriptor::GetStackParameterCount(); +} +void GeneratorStore::SetValueLocationConstraints() { + UseAny(context_input()); + UseRegister(generator_input()); + for (int i = 0; i < num_parameters_and_registers(); i++) { + UseAny(parameters_and_registers(i)); + } + RequireSpecificTemporary(WriteBarrierDescriptor::ObjectRegister()); + RequireSpecificTemporary(WriteBarrierDescriptor::SlotAddressRegister()); +} +void GeneratorStore::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register generator = ToRegister(generator_input()); + Register array = WriteBarrierDescriptor::ObjectRegister(); + __ LoadTaggedPointerField( + array, FieldMemOperand(generator, + JSGeneratorObject::kParametersAndRegistersOffset)); + + for (int i = 0; i < num_parameters_and_registers(); i++) { + // Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch + // register since it's a known temporary, and the write barrier slow path + // generates better code when value == scratch. + Register value = + __ FromAnyToRegister(parameters_and_registers(i), + WriteBarrierDescriptor::SlotAddressRegister()); + + ZoneLabelRef done(masm); + DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode( + [](MaglevAssembler* masm, ZoneLabelRef done, Register value, + Register array, GeneratorStore* node, int32_t offset) { + ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); + __ CheckPageFlag( + value, + MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, eq, + *done); + + Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister(); + __ Add(slot_reg, array, offset - kHeapObjectTag); + + // TODO(leszeks): Add an interface for flushing all double registers + // before this Node, to avoid needing to save them here. + SaveFPRegsMode const save_fp_mode = + !node->register_snapshot().live_double_registers.is_empty() + ? SaveFPRegsMode::kSave + : SaveFPRegsMode::kIgnore; + + __ CallRecordWriteStub(array, slot_reg, save_fp_mode); + + __ B(*done); + }, + done, value, array, this, FixedArray::OffsetOfElementAt(i)); + + __ StoreTaggedField( + value, FieldMemOperand(array, FixedArray::OffsetOfElementAt(i))); + __ JumpIfSmi(value, *done); + // TODO(leszeks): This will stay either false or true throughout this loop. + // Consider hoisting the check out of the loop and duplicating the loop into + // with and without write barrier. + __ CheckPageFlag(array, MemoryChunk::kPointersFromHereAreInterestingMask, + ne, &deferred_write_barrier->deferred_code_label); + + __ bind(*done); + } + + // Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch + // register, see comment above. + Register context = __ FromAnyToRegister( + context_input(), WriteBarrierDescriptor::SlotAddressRegister()); + + ZoneLabelRef done(masm); + DeferredCodeInfo* deferred_context_write_barrier = __ PushDeferredCode( + [](MaglevAssembler* masm, ZoneLabelRef done, Register context, + Register generator, GeneratorStore* node) { + ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); + // TODO(leszeks): The context is almost always going to be in + // old-space, consider moving this check to the fast path, maybe even + // as the first bailout. + __ CheckPageFlag(context, + MemoryChunk::kPointersFromHereAreInterestingMask, eq, + *done); + + __ Move(WriteBarrierDescriptor::ObjectRegister(), generator); + generator = WriteBarrierDescriptor::ObjectRegister(); + + Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister(); + __ Add(slot_reg, generator, + JSGeneratorObject::kContextOffset - kHeapObjectTag); + + // TODO(leszeks): Add an interface for flushing all double registers + // before this Node, to avoid needing to save them here. + SaveFPRegsMode const save_fp_mode = + !node->register_snapshot().live_double_registers.is_empty() + ? SaveFPRegsMode::kSave + : SaveFPRegsMode::kIgnore; + + __ CallRecordWriteStub(generator, slot_reg, save_fp_mode); + + __ B(*done); + }, + done, context, generator, this); + __ StoreTaggedField( + context, FieldMemOperand(generator, JSGeneratorObject::kContextOffset)); + __ AssertNotSmi(context); + __ CheckPageFlag(generator, MemoryChunk::kPointersFromHereAreInterestingMask, + ne, &deferred_context_write_barrier->deferred_code_label); + __ bind(*done); + + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ Mov(scratch, Smi::FromInt(suspend_id())); + __ StoreTaggedField( + scratch, + FieldMemOperand(generator, JSGeneratorObject::kContinuationOffset)); + __ Mov(scratch, Smi::FromInt(bytecode_offset())); + __ StoreTaggedField( + scratch, + FieldMemOperand(generator, JSGeneratorObject::kInputOrDebugPosOffset)); +} + void IncreaseInterruptBudget::SetValueLocationConstraints() {} void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { diff --git a/src/maglev/maglev-assembler.cc b/src/maglev/maglev-assembler.cc index e3bf347df2..641dd0d9d4 100644 --- a/src/maglev/maglev-assembler.cc +++ b/src/maglev/maglev-assembler.cc @@ -16,6 +16,23 @@ namespace v8 { namespace internal { namespace maglev { +Register MaglevAssembler::FromAnyToRegister(const Input& input, + Register scratch) { + if (input.operand().IsConstant()) { + input.node()->LoadToRegister(this, scratch); + return scratch; + } + const compiler::AllocatedOperand& operand = + compiler::AllocatedOperand::cast(input.operand()); + if (operand.IsRegister()) { + return ToRegister(input); + } else { + DCHECK(operand.IsStackSlot()); + Move(scratch, ToMemOperand(input)); + return scratch; + } +} + void MaglevAssembler::LoadSingleCharacterString(Register result, int char_code) { DCHECK_GE(char_code, 0); diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index cf977b8815..f087c38e7c 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -78,11 +78,13 @@ class MaglevAssembler : public MacroAssembler { void LoadSingleCharacterString(Register result, Register char_code, Register scratch); + inline void BindBlock(BasicBlock* block); + inline Condition IsRootConstant(Input input, RootIndex root_index); inline void Branch(Condition condition, BasicBlock* if_true, BasicBlock* if_false, BasicBlock* next_block); - inline Register FromAnyToRegister(const Input& input, Register scratch); + Register FromAnyToRegister(const Input& input, Register scratch); inline void LoadBoundedSizeFromObject(Register result, Register object, int offset); diff --git a/src/maglev/maglev-basic-block.h b/src/maglev/maglev-basic-block.h index 7f583e5bb2..19a919883c 100644 --- a/src/maglev/maglev-basic-block.h +++ b/src/maglev/maglev-basic-block.h @@ -73,6 +73,13 @@ class BasicBlock { edge_split_block_register_state_ = nullptr; } + bool is_start_block_of_switch_case() const { + return is_start_block_of_switch_case_; + } + void set_start_block_of_switch_case(bool value) { + is_start_block_of_switch_case_ = value; + } + Phi::List* phis() const { DCHECK(has_phi()); return state_->phis(); @@ -103,6 +110,7 @@ class BasicBlock { private: bool is_edge_split_block_ = false; + bool is_start_block_of_switch_case_ = false; Node::List nodes_; ControlNode* control_node_; union { diff --git a/src/maglev/maglev-code-generator.cc b/src/maglev/maglev-code-generator.cc index f7aacba557..6479c2d5a0 100644 --- a/src/maglev/maglev-code-generator.cc +++ b/src/maglev/maglev-code-generator.cc @@ -618,8 +618,7 @@ class MaglevCodeGeneratingNodeProcessor { ss << "-- Block b" << graph_labeller()->BlockId(block); __ RecordComment(ss.str()); } - - __ bind(block->label()); + __ BindBlock(block); } template diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 6c5385dad7..4732661286 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -1392,6 +1392,37 @@ void StoreInArrayLiteralGeneric::GenerateCode(MaglevAssembler* masm, masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } +void GeneratorRestoreRegister::SetValueLocationConstraints() { + UseRegister(array_input()); + DefineAsRegister(this); + // TODO(victorgomes): Create a arch-agnostic scratch register scope. + set_temporaries_needed(2); +} +void GeneratorRestoreRegister::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register array = ToRegister(array_input()); + Register result_reg = ToRegister(result()); + Register temp = general_temporaries().PopFirst(); + + // The input and the output can alias, if that happen we use a temporary + // register and a move at the end. + Register value = (array == result_reg ? temp : result_reg); + + // Loads the current value in the generator register file. + __ DecompressAnyTagged( + value, FieldMemOperand(array, FixedArray::OffsetOfElementAt(index()))); + + // And trashs it with StaleRegisterConstant. + Register scratch = general_temporaries().PopFirst(); + __ LoadRoot(scratch, RootIndex::kStaleRegister); + __ StoreTaggedField( + FieldMemOperand(array, FixedArray::OffsetOfElementAt(index())), scratch); + + if (value != result_reg) { + __ Move(result_reg, value); + } +} + int GetKeyedGeneric::MaxCallStackArgs() const { using D = CallInterfaceDescriptorFor::type; return D::GetStackParameterCount(); @@ -1416,6 +1447,19 @@ void GetKeyedGeneric::GenerateCode(MaglevAssembler* masm, masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } +void StoreTaggedFieldNoWriteBarrier::SetValueLocationConstraints() { + UseRegister(object_input()); + UseRegister(value_input()); +} +void StoreTaggedFieldNoWriteBarrier::GenerateCode( + MaglevAssembler* masm, const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register value = ToRegister(value_input()); + + __ AssertNotSmi(object); + __ StoreTaggedField(FieldMemOperand(object, offset()), value); +} + int StringAt::MaxCallStackArgs() const { DCHECK_EQ(Runtime::FunctionForId(Runtime::kStringCharCodeAt)->nargs, 2); return std::max(2, AllocateDescriptor::GetStackParameterCount()); @@ -2104,6 +2148,27 @@ void BranchIfUndefinedOrNull::GenerateCode(MaglevAssembler* masm, } } +void Switch::SetValueLocationConstraints() { + UseAndClobberRegister(value()); + // TODO(victorgomes): Create a arch-agnostic scratch register scope. + set_temporaries_needed(1); +} +void Switch::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { + Register scratch = general_temporaries().PopFirst(); + std::unique_ptr labels = std::make_unique(size()); + for (int i = 0; i < size(); i++) { + BasicBlock* block = (targets())[i].block_ptr(); + block->set_start_block_of_switch_case(true); + labels[i] = block->label(); + } + __ Switch(scratch, ToRegister(value()), value_base(), labels.get(), size()); + if (has_fallthrough()) { + DCHECK_EQ(fallthrough(), state.next_block()); + } else { + __ Trap(); + } +} + // --- // Print params // --- diff --git a/src/maglev/x64/maglev-assembler-x64-inl.h b/src/maglev/x64/maglev-assembler-x64-inl.h index 27cd4ebf32..1419cd1998 100644 --- a/src/maglev/x64/maglev-assembler-x64-inl.h +++ b/src/maglev/x64/maglev-assembler-x64-inl.h @@ -129,6 +129,10 @@ void MaglevAssembler::PushReverse(T... vals) { detail::PushAllHelper::PushReverse(this, vals...); } +inline void MaglevAssembler::BindBlock(BasicBlock* block) { + bind(block->label()); +} + inline Condition MaglevAssembler::IsRootConstant(Input input, RootIndex root_index) { if (input.operand().IsRegister()) { @@ -157,23 +161,6 @@ void MaglevAssembler::Branch(Condition condition, BasicBlock* if_true, } } -Register MaglevAssembler::FromAnyToRegister(const Input& input, - Register scratch) { - if (input.operand().IsConstant()) { - input.node()->LoadToRegister(this, scratch); - return scratch; - } - const compiler::AllocatedOperand& operand = - compiler::AllocatedOperand::cast(input.operand()); - if (operand.IsRegister()) { - return ToRegister(input); - } else { - DCHECK(operand.IsStackSlot()); - movq(scratch, ToMemOperand(input)); - return scratch; - } -} - inline MemOperand MaglevAssembler::GetStackSlot( const compiler::AllocatedOperand& operand) { return MemOperand(rbp, GetFramePointerOffsetForStackSlot(operand)); diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 0f11cfaec2..354d87fa08 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -186,36 +186,6 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, Smi::FromInt(bytecode_offset())); } -void GeneratorRestoreRegister::SetValueLocationConstraints() { - UseRegister(array_input()); - DefineAsRegister(this); - set_temporaries_needed(1); -} -void GeneratorRestoreRegister::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - Register array = ToRegister(array_input()); - Register result_reg = ToRegister(result()); - Register temp = general_temporaries().PopFirst(); - - // The input and the output can alias, if that happen we use a temporary - // register and a move at the end. - Register value = (array == result_reg ? temp : result_reg); - - // Loads the current value in the generator register file. - __ DecompressAnyTagged( - value, FieldOperand(array, FixedArray::OffsetOfElementAt(index()))); - - // And trashs it with StaleRegisterConstant. - __ LoadRoot(kScratchRegister, RootIndex::kStaleRegister); - __ StoreTaggedField( - FieldOperand(array, FixedArray::OffsetOfElementAt(index())), - kScratchRegister); - - if (value != result_reg) { - __ Move(result_reg, value); - } -} - int CreateEmptyObjectLiteral::MaxCallStackArgs() const { return AllocateDescriptor::GetStackParameterCount(); } @@ -1244,19 +1214,6 @@ void StoreDoubleField::GenerateCode(MaglevAssembler* masm, __ Movsd(FieldOperand(tmp, HeapNumber::kValueOffset), value); } -void StoreTaggedFieldNoWriteBarrier::SetValueLocationConstraints() { - UseRegister(object_input()); - UseRegister(value_input()); -} -void StoreTaggedFieldNoWriteBarrier::GenerateCode( - MaglevAssembler* masm, const ProcessingState& state) { - Register object = ToRegister(object_input()); - Register value = ToRegister(value_input()); - - __ AssertNotSmi(object); - __ StoreTaggedField(FieldOperand(object, offset()), value); -} - int StoreMap::MaxCallStackArgs() const { return WriteBarrierDescriptor::GetStackParameterCount(); } @@ -2783,21 +2740,6 @@ void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { __ Ret(); } -void Switch::SetValueLocationConstraints() { UseAndClobberRegister(value()); } -void Switch::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { - std::unique_ptr labels = std::make_unique(size()); - for (int i = 0; i < size(); i++) { - labels[i] = (targets())[i].block_ptr()->label(); - } - __ Switch(kScratchRegister, ToRegister(value()), value_base(), labels.get(), - size()); - if (has_fallthrough()) { - DCHECK_EQ(fallthrough(), state.next_block()); - } else { - __ Trap(); - } -} - namespace { void AttemptOnStackReplacement(MaglevAssembler* masm, From bbe24f16c645e9ef40c506b12b3353fa10123d89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Fl=C3=BCckiger?= Date: Wed, 21 Dec 2022 16:46:45 +0000 Subject: [PATCH 029/654] [static-roots] Clear string padding faster MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clearing the exact amount of padding bytes apparently measurably regresses some string operations. For freshly allocated strings we can write into the payload area too, since that one is being written later. This allows us to clear a statically known amount of padding bytes which greatly speeds up the initialization. Bug: chromium:1402898 Bug: v8:13466 Change-Id: Ib5fd4877a88c88fbf5247ed0e2c4b2de1775623d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4118772 Reviewed-by: Toon Verwaest Reviewed-by: Jakob Linke Commit-Queue: Olivier Flückiger Cr-Commit-Position: refs/heads/main@{#84980} --- src/heap/factory-base.cc | 6 +++--- src/objects/string-inl.h | 28 ++++++++++++---------------- src/objects/string.cc | 12 ++++++------ src/objects/string.h | 12 ++++++------ 4 files changed, 27 insertions(+), 31 deletions(-) diff --git a/src/heap/factory-base.cc b/src/heap/factory-base.cc index 4040c61fc1..3072a93285 100644 --- a/src/heap/factory-base.cc +++ b/src/heap/factory-base.cc @@ -674,10 +674,10 @@ MaybeHandle FactoryBase::NewRawStringWithMap( SeqStringT string = SeqStringT::cast(AllocateRawWithImmortalMap(size, allocation, map)); DisallowGarbageCollection no_gc; + string.clear_padding_destructively(length); string.set_length(length); string.set_raw_hash_field(String::kEmptyHashField); DCHECK_EQ(size, string.Size()); - string.clear_padding(); return handle(string, isolate()); } @@ -1055,10 +1055,10 @@ FactoryBase::AllocateRawOneByteInternalizedString( map); SeqOneByteString answer = SeqOneByteString::cast(result); DisallowGarbageCollection no_gc; + answer.clear_padding_destructively(length); answer.set_length(length); answer.set_raw_hash_field(raw_hash_field); DCHECK_EQ(size, answer.Size()); - answer.clear_padding(); return handle(answer, isolate()); } @@ -1077,10 +1077,10 @@ FactoryBase::AllocateRawTwoByteInternalizedString( map), map)); DisallowGarbageCollection no_gc; + answer.clear_padding_destructively(length); answer.set_length(length); answer.set_raw_hash_field(raw_hash_field); DCHECK_EQ(size, answer.Size()); - answer.clear_padding(); return handle(answer, isolate()); } diff --git a/src/objects/string-inl.h b/src/objects/string-inl.h index 4ce55c276a..87fd2c5505 100644 --- a/src/objects/string-inl.h +++ b/src/objects/string-inl.h @@ -1476,24 +1476,20 @@ SubStringRange::iterator SubStringRange::end() { return SubStringRange::iterator(string_, first_ + length_, no_gc_); } -void SeqOneByteString::clear_padding() { - const int data_size = SeqString::kHeaderSize + length() * kOneByteSize; - const int padding_size = SizeFor(length()) - data_size; - DCHECK_EQ((DataAndPaddingSizes{data_size, padding_size}), - GetDataAndPaddingSizes()); - DCHECK_EQ(address() + data_size + padding_size, address() + Size()); - if (padding_size == 0) return; - memset(reinterpret_cast(address() + data_size), 0, padding_size); +void SeqOneByteString::clear_padding_destructively(int length) { + // Ensure we are not killing the map word, which is already set at this point + static_assert(SizeFor(0) >= kObjectAlignment + kTaggedSize); + memset( + reinterpret_cast(address() + SizeFor(length) - kObjectAlignment), + 0, kObjectAlignment); } -void SeqTwoByteString::clear_padding() { - const int data_size = SeqString::kHeaderSize + length() * base::kUC16Size; - const int padding_size = SizeFor(length()) - data_size; - DCHECK_EQ((DataAndPaddingSizes{data_size, padding_size}), - GetDataAndPaddingSizes()); - DCHECK_EQ(address() + data_size + padding_size, address() + Size()); - if (padding_size == 0) return; - memset(reinterpret_cast(address() + data_size), 0, padding_size); +void SeqTwoByteString::clear_padding_destructively(int length) { + // Ensure we are not killing the map word, which is already set at this point + static_assert(SizeFor(0) >= kObjectAlignment + kTaggedSize); + memset( + reinterpret_cast(address() + SizeFor(length) - kObjectAlignment), + 0, kObjectAlignment); } // static diff --git a/src/objects/string.cc b/src/objects/string.cc index bf59d1ccdf..6877bce16b 100644 --- a/src/objects/string.cc +++ b/src/objects/string.cc @@ -1824,7 +1824,7 @@ Handle SeqString::Truncate(Isolate* isolate, Handle string, // We are storing the new length using release store after creating a filler // for the left-over space to avoid races with the sweeper thread. string->set_length(new_length, kReleaseStore); - string->clear_padding(); + string->ClearPadding(); return string; } @@ -1850,11 +1850,11 @@ SeqString::DataAndPaddingSizes SeqTwoByteString::GetDataAndPaddingSizes() return DataAndPaddingSizes{data_size, padding_size}; } -void SeqString::clear_padding() { - if (IsSeqOneByteString()) { - return SeqOneByteString::cast(*this).clear_padding(); - } - return SeqTwoByteString::cast(*this).clear_padding(); +void SeqString::ClearPadding() { + DataAndPaddingSizes sz = GetDataAndPaddingSizes(); + DCHECK_EQ(address() + sz.data_size + sz.padding_size, address() + Size()); + if (sz.padding_size == 0) return; + memset(reinterpret_cast(address() + sz.data_size), 0, sz.padding_size); } uint16_t ConsString::Get( diff --git a/src/objects/string.h b/src/objects/string.h index 1b3ea5a0a3..bf1730314e 100644 --- a/src/objects/string.h +++ b/src/objects/string.h @@ -714,8 +714,8 @@ class SeqString : public TorqueGeneratedSeqString { }; DataAndPaddingSizes GetDataAndPaddingSizes() const; - // Zero out the padding bytes of this string. - void clear_padding(); + // Zero out only the padding bytes of this string. + void ClearPadding(); TQ_OBJECT_CONSTRUCTORS(SeqString) }; @@ -761,8 +761,8 @@ class SeqOneByteString DataAndPaddingSizes GetDataAndPaddingSizes() const; - // Zero out the padding bytes of this string. - inline void clear_padding(); + // Initializes padding bytes. Potentially zeros tail of the payload too! + inline void clear_padding_destructively(int length); // Maximal memory usage for a single sequential one-byte string. static const int kMaxCharsSize = kMaxLength; @@ -808,8 +808,8 @@ class SeqTwoByteString DataAndPaddingSizes GetDataAndPaddingSizes() const; - // Zero out the padding bytes of this string. - inline void clear_padding(); + // Initializes padding bytes. Potentially zeros tail of the payload too! + inline void clear_padding_destructively(int length); // Maximal memory usage for a single sequential two-byte string. static const int kMaxCharsSize = kMaxLength * 2; From fd81728f2e677a0fa59db24138ce779ce655eb8e Mon Sep 17 00:00:00 2001 From: Andrew Grieve Date: Tue, 20 Dec 2022 22:12:29 -0500 Subject: [PATCH 030/654] Depend direclty on run_mksnapshot_default rather v8 for v8_external_startup_data_assets This will prevent building v8 arm code when all we want is the snapshot file. Bug: chromium:1402705 Change-Id: If8137e8ef79ff1943c24d5bd71d8374e59bdf561 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4117695 Commit-Queue: Adam Klein Auto-Submit: Andrew Grieve Reviewed-by: Adam Klein Cr-Commit-Position: refs/heads/main@{#84981} --- BUILD.gn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILD.gn b/BUILD.gn index 87d0428475..682dfddc4a 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -1602,7 +1602,7 @@ template("asm_to_inline_asm") { if (is_android && enable_java_templates) { android_assets("v8_external_startup_data_assets") { if (v8_use_external_startup_data) { - deps = [ "//v8" ] + deps = [ ":run_mksnapshot_default" ] renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ] if (current_cpu == "arm" || current_cpu == "x86") { renaming_destinations = [ "snapshot_blob_32.bin" ] From 477116fb5a111ba0cc71be66a86a3c29f648e906 Mon Sep 17 00:00:00 2001 From: Adam Klein Date: Wed, 21 Dec 2022 10:25:42 -0800 Subject: [PATCH 031/654] Skip mjsunit/md5 under --future due to maglev It's failing regularly on the ubsan bot, closing the tree. This CL also adds a 'variant == future' section in the mjsunit.status file to enable easy disabling of maglev-specific failures. No-Tree-Checks: true Bug: v8:13612 Change-Id: I3c0f7725e1fb36577e97c662cd3830c64ba298e0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4120554 Bot-Commit: Rubber Stamper Commit-Queue: Rubber Stamper Auto-Submit: Adam Klein Cr-Commit-Position: refs/heads/main@{#84982} --- test/mjsunit/mjsunit.status | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index b69d19df1b..9f3dba4754 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -1507,6 +1507,16 @@ 'never-optimize': [SKIP], }], # variant == maglev +############################################################################## +['variant == future', { + # Because future implies maglev, tests that fail under maglev now also fail + # under future. Thus this section likely supercedes the maglev section above. + # TODO(adamk): Remove the maglev section in preference to this one? + + # https://crbug.com/v8/13612 + 'md5': [SKIP], +}], # variant == future + ############################################################################## ['no_simd_hardware == True', { 'wasm/exceptions-simd': [SKIP], From dcba0f020144c22948b337c8b0bf1926a6829eae Mon Sep 17 00:00:00 2001 From: Milad Fa Date: Wed, 21 Dec 2022 12:29:41 -0500 Subject: [PATCH 032/654] PPC[liftoff]: Implement FP promote and demote Change-Id: I9e2c79d9b1b679c2780135fe02c14f526bdf93f0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4120355 Reviewed-by: Vasili Skurydzin Commit-Queue: Milad Farazmand Cr-Commit-Position: refs/heads/main@{#84983} --- src/codegen/ppc/macro-assembler-ppc.cc | 19 ++++++++++++++ src/codegen/ppc/macro-assembler-ppc.h | 2 ++ .../backend/ppc/code-generator-ppc.cc | 22 ++-------------- src/wasm/baseline/ppc/liftoff-assembler-ppc.h | 26 +++++++------------ 4 files changed, 32 insertions(+), 37 deletions(-) diff --git a/src/codegen/ppc/macro-assembler-ppc.cc b/src/codegen/ppc/macro-assembler-ppc.cc index fdbcb2788b..15441e7db4 100644 --- a/src/codegen/ppc/macro-assembler-ppc.cc +++ b/src/codegen/ppc/macro-assembler-ppc.cc @@ -4573,6 +4573,25 @@ void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, } #undef EXT_ADD_PAIRWISE +void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst, + Simd128Register src) { + constexpr int lane_number = 8; + vextractd(dst, src, Operand(lane_number)); + vinsertw(dst, dst, Operand(lane_number)); + xvcvspdp(dst, dst); +} + +void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, + Simd128Register src, + Simd128Register scratch) { + constexpr int lane_number = 8; + xvcvdpsp(scratch, src); + vextractuw(dst, scratch, Operand(lane_number)); + vinsertw(scratch, dst, Operand(4)); + vxor(dst, dst, dst); + vinsertd(dst, scratch, Operand(lane_number)); +} + void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3) { diff --git a/src/codegen/ppc/macro-assembler-ppc.h b/src/codegen/ppc/macro-assembler-ppc.h index 67d0565f0b..14fff32563 100644 --- a/src/codegen/ppc/macro-assembler-ppc.h +++ b/src/codegen/ppc/macro-assembler-ppc.h @@ -1238,6 +1238,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { V(F64x2Ceil) \ V(F64x2Floor) \ V(F64x2Trunc) \ + V(F64x2PromoteLowF32x4) \ V(F32x4Abs) \ V(F32x4Neg) \ V(F32x4Sqrt) \ @@ -1266,6 +1267,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { #undef SIMD_UNOP_LIST #define SIMD_UNOP_WITH_SCRATCH_LIST(V) \ + V(F32x4DemoteF64x2Zero) \ V(I64x2Abs) \ V(I32x4Abs) \ V(I32x4SConvertF32x4) \ diff --git a/src/compiler/backend/ppc/code-generator-ppc.cc b/src/compiler/backend/ppc/code-generator-ppc.cc index 471ccc00f5..838a823a96 100644 --- a/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/src/compiler/backend/ppc/code-generator-ppc.cc @@ -2356,6 +2356,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( V(F64x2Ceil) \ V(F64x2Floor) \ V(F64x2Trunc) \ + V(F64x2PromoteLowF32x4) \ V(F32x4Abs) \ V(F32x4Neg) \ V(F32x4SConvertI32x4) \ @@ -2387,6 +2388,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( #undef SIMD_UNOP_LIST #define SIMD_UNOP_WITH_SCRATCH_LIST(V) \ + V(F32x4DemoteF64x2Zero) \ V(I64x2Abs) \ V(I32x4Abs) \ V(I32x4SConvertF32x4) \ @@ -2893,26 +2895,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } #undef MAYBE_REVERSE_BYTES - case kPPC_F64x2PromoteLowF32x4: { - constexpr int lane_number = 8; - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - __ vextractd(kScratchSimd128Reg, src, Operand(lane_number)); - __ vinsertw(kScratchSimd128Reg, kScratchSimd128Reg, Operand(lane_number)); - __ xvcvspdp(dst, kScratchSimd128Reg); - break; - } - case kPPC_F32x4DemoteF64x2Zero: { - constexpr int lane_number = 8; - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - __ xvcvdpsp(kScratchSimd128Reg, src); - __ vextractuw(dst, kScratchSimd128Reg, Operand(lane_number)); - __ vinsertw(kScratchSimd128Reg, dst, Operand(4)); - __ vxor(dst, dst, dst); - __ vinsertd(dst, kScratchSimd128Reg, Operand(lane_number)); - break; - } case kPPC_I32x4TruncSatF64x2SZero: { constexpr int lane_number = 8; Simd128Register src = i.InputSimd128Register(0); diff --git a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h index b6511dbfc0..8a16ac0142 100644 --- a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h +++ b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h @@ -1975,6 +1975,7 @@ SIMD_SHIFT_RI_LIST(EMIT_SIMD_SHIFT_RI) V(f64x2_ceil, F64x2Ceil, true, bool) \ V(f64x2_floor, F64x2Floor, true, bool) \ V(f64x2_trunc, F64x2Trunc, true, bool) \ + V(f64x2_promote_low_f32x4, F64x2PromoteLowF32x4, , void) \ V(f32x4_abs, F32x4Abs, , void) \ V(f32x4_neg, F32x4Neg, , void) \ V(f32x4_sqrt, F32x4Sqrt, , void) \ @@ -2006,13 +2007,14 @@ SIMD_UNOP_LIST(EMIT_SIMD_UNOP) #undef EMIT_SIMD_UNOP #undef SIMD_UNOP_LIST -#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \ - V(i64x2_abs, I64x2Abs, , void) \ - V(i32x4_abs, I32x4Abs, , void) \ - V(i32x4_sconvert_f32x4, I32x4SConvertF32x4, , void) \ - V(i16x8_abs, I16x8Abs, , void) \ - V(i16x8_neg, I16x8Neg, , void) \ - V(i8x16_abs, I8x16Abs, , void) \ +#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \ + V(f32x4_demote_f64x2_zero, F32x4DemoteF64x2Zero, , void) \ + V(i64x2_abs, I64x2Abs, , void) \ + V(i32x4_abs, I32x4Abs, , void) \ + V(i32x4_sconvert_f32x4, I32x4SConvertF32x4, , void) \ + V(i16x8_abs, I16x8Abs, , void) \ + V(i16x8_neg, I16x8Neg, , void) \ + V(i8x16_abs, I8x16Abs, , void) \ V(i8x16_neg, I8x16Neg, , void) #define EMIT_SIMD_UNOP_WITH_SCRATCH(name, op, return_val, return_type) \ @@ -2318,11 +2320,6 @@ void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst, kScratchSimd128Reg); } -void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "f64x2.promote_low_f32x4"); -} - void LiftoffAssembler::emit_f32x4_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { @@ -2437,11 +2434,6 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst, mask.fp().toSimd()); } -void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "f32x4.demote_f64x2_zero"); -} - void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src) { I16x8UConvertI8x16Low(dst.fp().toSimd(), src.fp().toSimd(), r0, From 90eeb55fc7c297f7de263fb0f0e72d97a26cb52a Mon Sep 17 00:00:00 2001 From: Kunihiko Sakamoto Date: Tue, 20 Dec 2022 13:48:29 +0900 Subject: [PATCH 033/654] Clear compilation cache in Isolate::ClearCachesForTesting() This prevents blink leak detection from complaining when there is a reference from SharedFunctionInfo to a blink object that is a target of leak detection. Bug: chromium:1393246 Change-Id: I9381f5e27e90c77e4ed721fcc3d257b5f6edd212 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4114056 Commit-Queue: Kunihiko Sakamoto Reviewed-by: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#84984} --- src/api/api.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/api/api.cc b/src/api/api.cc index b425d61ce3..bfba6d66c7 100644 --- a/src/api/api.cc +++ b/src/api/api.cc @@ -38,6 +38,7 @@ #include "src/baseline/baseline-batch-compiler.h" #include "src/builtins/accessors.h" #include "src/builtins/builtins-utils.h" +#include "src/codegen/compilation-cache.h" #include "src/codegen/compiler.h" #include "src/codegen/cpu-features.h" #include "src/codegen/script-details.h" @@ -9651,6 +9652,7 @@ void Isolate::ClearCachesForTesting() { i::Isolate* i_isolate = reinterpret_cast(this); i_isolate->AbortConcurrentOptimization(i::BlockingBehavior::kBlock); i_isolate->ClearSerializerData(); + i_isolate->compilation_cache()->Clear(); } void Isolate::EnableMemorySavingsMode() { From c3302c902a6d60f4291314b5d806ca97c1736bc7 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Wed, 21 Dec 2022 19:27:43 -0800 Subject: [PATCH 034/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/8b3f134..92221be Rolling v8/buildtools: https://chromium.googlesource.com/chromium/src/buildtools/+log/600a615..134af4c Rolling v8/buildtools/third_party/libc++/trunk: https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx/+log/7b20455..7c5e4b4 Rolling v8/buildtools/third_party/libc++abi/trunk: https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi/+log/123239c..df3cc8e Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/1e43416..1651224 Rolling v8/third_party/depot_tools: https://chromium.googlesource.com/chromium/tools/depot_tools/+log/5decb17..cf31045 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221219.3.1..version:11.20221221.3.1 Rolling v8/tools/clang: https://chromium.googlesource.com/chromium/src/tools/clang/+log/cab032b..3b54a13 Change-Id: Iac0610e49ec38efc0cebb8c1d09ba07941596561 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4121196 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#84985} --- DEPS | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/DEPS b/DEPS index 31a0702c19..8bf011c9db 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221219.3.1', + 'fuchsia_version': 'version:11.20221221.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,9 +105,9 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '8b3f1346a4f7f3b89c938e537a9be0e2120a9535', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '92221beb1ec71e7f6847cf4da4496ad8bb23337c', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '600a61514a682cdda7952a3ef8c75acd9487fa6b', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '134af4c91bb9ab46fe1165ff1cf0f76900fa5a7e', 'buildtools/clang_format/script': Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7', 'buildtools/linux64': { @@ -131,9 +131,9 @@ deps = { 'condition': 'host_os == "mac"', }, 'buildtools/third_party/libc++/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '7b20455cbdf0891a6e5e2b66609b08c4f407ae5f', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '7c5e4b4eb3c5970f5525f62650c5b76f56dd99a8', 'buildtools/third_party/libc++abi/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '123239cdb67b3d69c5af933e364a84019a33575c', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'df3cc8ecee393c765a7274a4687f8dff3558d590', 'buildtools/third_party/libunwind/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '5e22a7fe2335161ab267867c8e1be481bf6c8300', 'buildtools/win': { @@ -209,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '1e4341629217ba4a71a976d9c173d13f7c4e63a4', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '1651224cd6bc419f6d1a12ee09074daa9f3ebbdc', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -217,7 +217,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '5decb175432cb284b6f8ee102dc1b908b58d8e41', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'cf31045b347e24e6619f2564fdb0c2490f661745', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { @@ -272,7 +272,7 @@ deps = { 'third_party/zlib': Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '18d27fa10b237fdfcbd8f0c65c19fe009981a3bc', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'cab032b11ddc12804bf4ae8d71a6e0f88bc51ddb', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '3b54a131a94f1345579c9d92b08c2b45c43cfe77', 'tools/luci-go': { 'packages': [ { From ac65192ce2f2ecd244f6cc592da3c2bba4916948 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Thu, 22 Dec 2022 11:41:40 +0800 Subject: [PATCH 035/654] [loong64][mips64][centry] Remove the unused SaveFPRegsMode parameter Port 605e46479aca3449a6ba1350a1de7927c76b86ad Bug: v8:13606 Change-Id: I8dc8ba9c8ec57d01e290f1817d3fa1f8aa17263a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4117943 Auto-Submit: Liu Yu Commit-Queue: Zhao Jiazhong Reviewed-by: Zhao Jiazhong Cr-Commit-Position: refs/heads/main@{#84986} --- src/builtins/loong64/builtins-loong64.cc | 17 +++----- src/builtins/mips64/builtins-mips64.cc | 17 +++----- .../loong64/macro-assembler-loong64.cc | 39 ++++--------------- src/codegen/loong64/macro-assembler-loong64.h | 20 ++++------ src/codegen/mips64/macro-assembler-mips64.cc | 39 ++++--------------- src/codegen/mips64/macro-assembler-mips64.h | 20 ++++------ 6 files changed, 40 insertions(+), 112 deletions(-) diff --git a/src/builtins/loong64/builtins-loong64.cc b/src/builtins/loong64/builtins-loong64.cc index 74a88dbb1b..dbd9226f52 100644 --- a/src/builtins/loong64/builtins-loong64.cc +++ b/src/builtins/loong64/builtins-loong64.cc @@ -2754,8 +2754,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, - bool builtin_exit_frame) { + ArgvMode argv_mode, bool builtin_exit_frame) { // Called from JavaScript; parameters are on stack as if calling JS function // a0: number of arguments including receiver // a1: pointer to builtin function @@ -2778,8 +2777,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Enter the exit frame that transitions from JavaScript to C++. FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame( - save_doubles == SaveFPRegsMode::kSave, 0, - builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); + 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // s0: number of arguments including receiver (C callee-saved) // s1: pointer to first argument (C callee-saved) @@ -2832,7 +2830,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ? no_reg // s0: still holds argc (callee-saved). : s0; - __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN); + __ LeaveExitFrame(argc, EMIT_RETURN); // Handling of exception. __ bind(&exception_returned); @@ -3099,10 +3097,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ Ld_d(s0, *stack_space_operand); } - static constexpr bool kDontSaveDoubles = false; static constexpr bool kRegisterContainsSlotCount = false; - __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN, - kRegisterContainsSlotCount); + __ LeaveExitFrame(s0, NO_EMIT_RETURN, kRegisterContainsSlotCount); // Check if the function scheduled an exception. __ LoadRoot(a4, RootIndex::kTheHoleValue); @@ -3211,9 +3207,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // Allocate the v8::Arguments structure in the arguments' space since // it's not controlled by GC. static constexpr int kApiStackSpace = 4; - static constexpr bool kDontSaveDoubles = false; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); // EnterExitFrame may align the sp. @@ -3311,7 +3306,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { const int kApiStackSpace = 1; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(false, kApiStackSpace); + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); // Create v8::PropertyCallbackInfo object on the stack and initialize // it's args_ field. diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc index 741744d9f1..1004c32cb0 100644 --- a/src/builtins/mips64/builtins-mips64.cc +++ b/src/builtins/mips64/builtins-mips64.cc @@ -2774,8 +2774,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, - bool builtin_exit_frame) { + ArgvMode argv_mode, bool builtin_exit_frame) { // Called from JavaScript; parameters are on stack as if calling JS function // a0: number of arguments including receiver // a1: pointer to builtin function @@ -2798,8 +2797,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Enter the exit frame that transitions from JavaScript to C++. FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame( - save_doubles == SaveFPRegsMode::kSave, 0, - builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); + 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // s0: number of arguments including receiver (C callee-saved) // s1: pointer to first argument (C callee-saved) @@ -2852,7 +2850,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ? no_reg // s0: still holds argc (callee-saved). : s0; - __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN); + __ LeaveExitFrame(argc, EMIT_RETURN); // Handling of exception. __ bind(&exception_returned); @@ -3118,10 +3116,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ Ld(s0, *stack_space_operand); } - static constexpr bool kDontSaveDoubles = false; static constexpr bool kRegisterContainsSlotCount = false; - __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN, - kRegisterContainsSlotCount); + __ LeaveExitFrame(s0, NO_EMIT_RETURN, kRegisterContainsSlotCount); // Check if the function scheduled an exception. __ LoadRoot(a4, RootIndex::kTheHoleValue); @@ -3231,9 +3227,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // Allocate the v8::Arguments structure in the arguments' space since // it's not controlled by GC. static constexpr int kApiStackSpace = 4; - static constexpr bool kDontSaveDoubles = false; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); // EnterExitFrame may align the sp. @@ -3330,7 +3325,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { const int kApiStackSpace = 1; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(false, kApiStackSpace); + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); // Create v8::PropertyCallbackInfo object on the stack and initialize // it's args_ field. diff --git a/src/codegen/loong64/macro-assembler-loong64.cc b/src/codegen/loong64/macro-assembler-loong64.cc index 8dfe2d5fc5..26df7fb6cd 100644 --- a/src/codegen/loong64/macro-assembler-loong64.cc +++ b/src/codegen/loong64/macro-assembler-loong64.cc @@ -3359,8 +3359,8 @@ void TurboAssembler::MulOverflow_d(Register dst, Register left, xor_(overflow, overflow, scratch2); } -void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles) { +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments) { ASM_CODE_COMMENT(this); // All parameters are on the stack. v0 has the return value after call. @@ -3375,8 +3375,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, // smarter. PrepareCEntryArgs(num_arguments); PrepareCEntryFunction(ExternalReference::Create(f)); - Handle code = - CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Handle code = CodeFactory::CEntry(isolate(), f->result_size); Call(code, RelocInfo::CODE_TARGET); } @@ -3393,8 +3392,8 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, bool builtin_exit_frame) { PrepareCEntryFunction(builtin); - Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, builtin_exit_frame); + Handle code = + CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg)); } @@ -3556,7 +3555,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { Ld_d(fp, MemOperand(fp, 0 * kPointerSize)); } -void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, +void MacroAssembler::EnterExitFrame(int stack_space, StackFrame::Type frame_type) { ASM_CODE_COMMENT(this); DCHECK(frame_type == StackFrame::EXIT || @@ -3605,17 +3604,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, } const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); - if (save_doubles) { - // The stack is already aligned to 0 modulo 8 for stores with sdc1. - int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2; - int space = kNumOfSavedRegisters * kDoubleSize; - Sub_d(sp, sp, Operand(space)); - // Remember: we only need to save every 2nd double FPU value. - for (int i = 0; i < kNumOfSavedRegisters; i++) { - FPURegister reg = FPURegister::from_code(2 * i); - Fst_d(reg, MemOperand(sp, i * kDoubleSize)); - } - } // Reserve place for the return address, stack space and an optional slot // (used by DirectCEntry to hold the return value if a struct is @@ -3635,23 +3623,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, St_d(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); } -void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, - bool do_return, +void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return, bool argument_count_is_length) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); - // Optionally restore all double registers. - if (save_doubles) { - // Remember: we only need to restore every 2nd double FPU value. - int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2; - Sub_d(t8, fp, - Operand(ExitFrameConstants::kFixedFrameSizeFromFp + - kNumOfSavedRegisters * kDoubleSize)); - for (int i = 0; i < kNumOfSavedRegisters; i++) { - FPURegister reg = FPURegister::from_code(2 * i); - Fld_d(reg, MemOperand(t8, i * kDoubleSize)); - } - } // Clear top frame. li(t8, diff --git a/src/codegen/loong64/macro-assembler-loong64.h b/src/codegen/loong64/macro-assembler-loong64.h index 57395c903a..d736d04b72 100644 --- a/src/codegen/loong64/macro-assembler-loong64.h +++ b/src/codegen/loong64/macro-assembler-loong64.h @@ -899,14 +899,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Enter exit frame. // argc - argument count to be dropped by LeaveExitFrame. - // save_doubles - saves FPU registers on stack, currently disabled. // stack_space - extra stack space. - void EnterExitFrame(bool save_doubles, int stack_space = 0, - StackFrame::Type frame_type = StackFrame::EXIT); + void EnterExitFrame(int stack_space, StackFrame::Type frame_type); // Leave the current exit frame. - void LeaveExitFrame(bool save_doubles, Register arg_count, - bool do_return = NO_EMIT_RETURN, + void LeaveExitFrame(Register arg_count, bool do_return = NO_EMIT_RETURN, bool argument_count_is_length = false); // Make sure the stack is aligned. Only emits code in debug mode. @@ -966,20 +963,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Runtime calls. // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); + void CallRuntime(const Runtime::Function* f, int num_arguments); // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { + void CallRuntime(Runtime::FunctionId fid) { const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, save_doubles); + CallRuntime(function, function->nargs); } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); + void CallRuntime(Runtime::FunctionId fid, int num_arguments) { + CallRuntime(Runtime::FunctionForId(fid), num_arguments); } // Convenience function: tail call a runtime routine (jump). diff --git a/src/codegen/mips64/macro-assembler-mips64.cc b/src/codegen/mips64/macro-assembler-mips64.cc index 051d758515..31321bf909 100644 --- a/src/codegen/mips64/macro-assembler-mips64.cc +++ b/src/codegen/mips64/macro-assembler-mips64.cc @@ -5267,8 +5267,8 @@ void TurboAssembler::DMulOverflow(Register dst, Register left, xor_(overflow, overflow, scratch); } -void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles) { +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments) { ASM_CODE_COMMENT(this); // All parameters are on the stack. v0 has the return value after call. @@ -5283,8 +5283,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, // smarter. PrepareCEntryArgs(num_arguments); PrepareCEntryFunction(ExternalReference::Create(f)); - Handle code = - CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Handle code = CodeFactory::CEntry(isolate(), f->result_size); Call(code, RelocInfo::CODE_TARGET); } @@ -5302,8 +5301,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, BranchDelaySlot bd, bool builtin_exit_frame) { PrepareCEntryFunction(builtin); - Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, builtin_exit_frame); + Handle code = + CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd); } @@ -5465,7 +5464,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { Ld(fp, MemOperand(fp, 0 * kPointerSize)); } -void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, +void MacroAssembler::EnterExitFrame(int stack_space, StackFrame::Type frame_type) { ASM_CODE_COMMENT(this); DCHECK(frame_type == StackFrame::EXIT || @@ -5514,17 +5513,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, } const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); - if (save_doubles) { - // The stack is already aligned to 0 modulo 8 for stores with sdc1. - int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2; - int space = kNumOfSavedRegisters * kDoubleSize; - Dsubu(sp, sp, Operand(space)); - // Remember: we only need to save every 2nd double FPU value. - for (int i = 0; i < kNumOfSavedRegisters; i++) { - FPURegister reg = FPURegister::from_code(2 * i); - Sdc1(reg, MemOperand(sp, i * kDoubleSize)); - } - } // Reserve place for the return address, stack space and an optional slot // (used by DirectCEntry to hold the return value if a struct is @@ -5544,23 +5532,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, Sd(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); } -void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, - bool do_return, +void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return, bool argument_count_is_length) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); - // Optionally restore all double registers. - if (save_doubles) { - // Remember: we only need to restore every 2nd double FPU value. - int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2; - Dsubu(t8, fp, - Operand(ExitFrameConstants::kFixedFrameSizeFromFp + - kNumOfSavedRegisters * kDoubleSize)); - for (int i = 0; i < kNumOfSavedRegisters; i++) { - FPURegister reg = FPURegister::from_code(2 * i); - Ldc1(reg, MemOperand(t8, i * kDoubleSize)); - } - } // Clear top frame. li(t8, diff --git a/src/codegen/mips64/macro-assembler-mips64.h b/src/codegen/mips64/macro-assembler-mips64.h index 77897e1efd..4bd71b8e82 100644 --- a/src/codegen/mips64/macro-assembler-mips64.h +++ b/src/codegen/mips64/macro-assembler-mips64.h @@ -1085,14 +1085,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Enter exit frame. // argc - argument count to be dropped by LeaveExitFrame. - // save_doubles - saves FPU registers on stack, currently disabled. // stack_space - extra stack space. - void EnterExitFrame(bool save_doubles, int stack_space = 0, - StackFrame::Type frame_type = StackFrame::EXIT); + void EnterExitFrame(int stack_space, StackFrame::Type frame_type); // Leave the current exit frame. - void LeaveExitFrame(bool save_doubles, Register arg_count, - bool do_return = NO_EMIT_RETURN, + void LeaveExitFrame(Register arg_count, bool do_return = NO_EMIT_RETURN, bool argument_count_is_length = false); // Make sure the stack is aligned. Only emits code in debug mode. @@ -1152,20 +1149,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Runtime calls. // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); + void CallRuntime(const Runtime::Function* f, int num_arguments); // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { + void CallRuntime(Runtime::FunctionId fid) { const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, save_doubles); + CallRuntime(function, function->nargs); } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); + void CallRuntime(Runtime::FunctionId fid, int num_arguments) { + CallRuntime(Runtime::FunctionForId(fid), num_arguments); } // Convenience function: tail call a runtime routine (jump). From 2c36e2213f5417ef2c08f9ce83871458c6d94b84 Mon Sep 17 00:00:00 2001 From: Matthias Liedtke Date: Wed, 21 Dec 2022 15:59:33 +0100 Subject: [PATCH 036/654] [wasm.gc] WebAssembly.Table: Allow 'i31ref' type specifier Bug: v8:7748 Change-Id: Iec34e16219a76e83cfadf7724fda5a6cfa80f69c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4115748 Reviewed-by: Manos Koukoutos Auto-Submit: Matthias Liedtke Commit-Queue: Manos Koukoutos Cr-Commit-Position: refs/heads/main@{#84987} --- src/wasm/wasm-js.cc | 4 ++- .../debugger/wasm-gc-anyref-expected.txt | 15 ++++---- test/inspector/debugger/wasm-gc-anyref.js | 19 ++++++++-- .../wasm/reference-table-js-interop.js | 27 +++++++++++--- test/mjsunit/wasm/reference-tables.js | 35 +++++++++++++++++++ 5 files changed, 85 insertions(+), 15 deletions(-) diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc index 263ea125a3..3d9f621535 100644 --- a/src/wasm/wasm-js.cc +++ b/src/wasm/wasm-js.cc @@ -1179,8 +1179,10 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo& args) { } else if (enabled_features.has_gc() && string->StringEquals(v8_str(isolate, "arrayref"))) { type = i::wasm::kWasmArrayRef; + } else if (enabled_features.has_gc() && + string->StringEquals(v8_str(isolate, "i31ref"))) { + type = i::wasm::kWasmI31Ref; } else { - // TODO(7748): Add "i31ref". thrower.TypeError( "Descriptor property 'element' must be a WebAssembly reference type"); return; diff --git a/test/inspector/debugger/wasm-gc-anyref-expected.txt b/test/inspector/debugger/wasm-gc-anyref-expected.txt index 582a98053f..37585580b9 100644 --- a/test/inspector/debugger/wasm-gc-anyref-expected.txt +++ b/test/inspector/debugger/wasm-gc-anyref-expected.txt @@ -8,31 +8,32 @@ Module instantiated. Tables populated. Setting breakpoint { - columnNumber : 264 + columnNumber : 316 lineNumber : 0 scriptId : } Paused: -Script wasm://wasm/19fa3802 byte offset 264: Wasm opcode 0x01 (kExprNop) +Script wasm://wasm/2633f626 byte offset 316: Wasm opcode 0x01 (kExprNop) Scope: -at $main (0:264): +at $main (0:316): - scope (wasm-expression-stack): - stack: + stack: - scope (local): $anyref_local: Struct ((ref $type0)) $anyref_local2: Array ((ref $type1)) $anyref_local_i31: 30 (anyref) $anyref_local_null: null (anyref) - scope (module): - instance: exports: "exported_ref_table" (Table), "exported_func_table" (Table), "fill_tables" (Function), "main" (Function) + instance: exports: "exported_ref_table" (Table), "exported_func_table" (Table), "exported_i31_table" (Table), "fill_tables" (Function), "main" (Function) module: Module functions: "$my_func": (Function), "$fill_tables": (Function), "$main": (Function) globals: "$global0": function $my_func() { [native code] } (funcref) - tables: - $import.any_table: 0: Array(2) (anyref), 1: Struct ((ref $type0)), 2: null (anyref) + tables: + $import.any_table: 0: Array(2) (anyref), 1: 321 (anyref), 2: null (anyref), 3: null (anyref) $import.func_table: 0: function () { [native code] } (funcref), 1: function $my_func() { [native code] } (funcref), 2: null (funcref) $exported_ref_table: 0: Struct ((ref $type0)), 1: Array ((ref $type1)), 2: 30 (anyref), 3: null (anyref) $exported_func_table: 0: function external_fct() { [native code] } (funcref), 1: function $my_func() { [native code] } (funcref), 2: null (funcref) + $exported_i31_table: 0: 123456 (i31ref), 1: -123 (i31ref), 2: null (i31ref) at (anonymous) (0:17): - scope (global): -- skipped globals diff --git a/test/inspector/debugger/wasm-gc-anyref.js b/test/inspector/debugger/wasm-gc-anyref.js index 5ef0dc7a46..aa835e82e4 100644 --- a/test/inspector/debugger/wasm-gc-anyref.js +++ b/test/inspector/debugger/wasm-gc-anyref.js @@ -70,13 +70,15 @@ async function instantiateWasm() { let struct_type = builder.addStruct([makeField(kWasmI32, false)]); let array_type = builder.addArray(kWasmI32); let imported_ref_table = - builder.addImportedTable('import', 'any_table', 3, 3, kWasmAnyRef); + builder.addImportedTable('import', 'any_table', 4, 4, kWasmAnyRef); let imported_func_table = builder.addImportedTable('import', 'func_table', 3, 3, kWasmFuncRef); let ref_table = builder.addTable(kWasmAnyRef, 4) .exportAs('exported_ref_table'); let func_table = builder.addTable(kWasmFuncRef, 3) .exportAs('exported_func_table'); + let i31ref_table = builder.addTable(kWasmI31Ref, 3) + .exportAs('exported_i31_table'); let func = builder.addFunction('my_func', kSig_v_v).addBody([kExprNop]); // Make the function "declared". @@ -97,6 +99,10 @@ async function instantiateWasm() { ...wasmI32Const(123), kGCPrefix, kExprStructNew, struct_type, kExprTableSet, imported_ref_table, + ...wasmI32Const(1), + ...wasmI32Const(321), kGCPrefix, kExprI31New, + kExprTableSet, imported_ref_table, + // Fill imported func table. ...wasmI32Const(1), kExprRefFunc, func.index, @@ -106,6 +112,15 @@ async function instantiateWasm() { ...wasmI32Const(1), kExprRefFunc, func.index, kExprTableSet, func_table.index, + + // Fill i31 table. + ...wasmI32Const(0), + ...wasmI32Const(123456), kGCPrefix, kExprI31New, + kExprTableSet, i31ref_table.index, + + ...wasmI32Const(1), + ...wasmI32Const(-123), kGCPrefix, kExprI31New, + kExprTableSet, i31ref_table.index, ]).exportFunc(); let body = [ @@ -137,7 +152,7 @@ async function instantiateWasm() { let imports = `{'import' : { 'any_table': (() => { let js_table = - new WebAssembly.Table({element: 'anyref', initial: 3, maximum: 3}); + new WebAssembly.Table({element: 'anyref', initial: 4, maximum: 4}); js_table.set(0, ['JavaScript', 'value']); return js_table; })(), diff --git a/test/mjsunit/wasm/reference-table-js-interop.js b/test/mjsunit/wasm/reference-table-js-interop.js index 7644278edf..29a7625e21 100644 --- a/test/mjsunit/wasm/reference-table-js-interop.js +++ b/test/mjsunit/wasm/reference-table-js-interop.js @@ -12,6 +12,7 @@ let tableTypes = { "eqref": kWasmEqRef, "structref": kWasmStructRef, "arrayref": kWasmArrayRef, + "i31ref": kWasmI31Ref, }; // Test table consistency check. @@ -115,11 +116,13 @@ for (let [typeName, type] of Object.entries(tableTypes)) { builder.addFunction("createI31", i31Sig) .addBody([kExprI32Const, 12, kGCPrefix, kExprI31New]) .exportFunc(); - let structSig = typeName != "arrayref" ? creatorSig : creatorAnySig; + let structSig = typeName != "arrayref" && typeName != "i31ref" + ? creatorSig : creatorAnySig; builder.addFunction("createStruct", structSig) .addBody([kExprI32Const, 12, kGCPrefix, kExprStructNew, struct]) .exportFunc(); - let arraySig = typeName != "structref" ? creatorSig : creatorAnySig; + let arraySig = typeName != "structref" && typeName != "i31ref" + ? creatorSig : creatorAnySig; builder.addFunction("createArray", arraySig) .addBody([ kExprI32Const, 12, @@ -158,7 +161,7 @@ for (let [typeName, type] of Object.entries(tableTypes)) { assertSame(table.get(2), table.get(3)); // The same smi. } // Set struct. - if (typeName != "arrayref") { + if (typeName != "arrayref" && typeName != "i31ref") { table.set(4, wasm.exported(wasm.createStruct)); assertSame(table.get(4), wasm.tableGet(4)); assertEquals(12, wasm.tableGetStructVal(4)); @@ -168,7 +171,7 @@ for (let [typeName, type] of Object.entries(tableTypes)) { assertNotSame(table.get(4), table.get(5)); } // Set array. - if (typeName != "structref") { + if (typeName != "structref" && typeName != "i31ref") { table.set(6, wasm.exported(wasm.createArray)); assertSame(table.get(6), wasm.tableGet(6)); assertEquals(12, wasm.tableGetArrayVal(6)); @@ -190,7 +193,7 @@ for (let [typeName, type] of Object.entries(tableTypes)) { assertEquals(largeString, table.get(9)); } - if (typeName != "arrayref") { + if (typeName != "arrayref" && typeName != "i31ref") { // Grow table with explicit value. table.grow(2, wasm.exported(wasm.createStruct)); assertEquals(12, wasm.tableGetStructVal(size)); @@ -205,6 +208,19 @@ for (let [typeName, type] of Object.entries(tableTypes)) { assertEquals("Grow using a string", wasm.tableGet(14)); assertEquals("Grow using a string", table.get(14)); } + if (typeName == "i31ref" || typeName == "anyref") { + table.set(0, 123); + assertEquals(123, table.get(0)); + table.set(1, -123); + assertEquals(-123, table.get(1)); + if (typeName == "i31ref") { + assertThrows(() => table.set(0, 1 << 31), TypeError); + } else { + // anyref can reference boxed numbers as well. + table.set(0, 1 << 31) + assertEquals(1 << 31, table.get(0)); + } + } // Set from JS with wrapped wasm value of incompatible type. let invalidValues = { @@ -212,6 +228,7 @@ for (let [typeName, type] of Object.entries(tableTypes)) { "eqref": [], "structref": ["I31", "Array"], "arrayref": ["I31", "Struct"], + "i31ref": ["Struct", "Array"], }; for (let invalidType of invalidValues[typeName]) { print(`Test invalid type ${invalidType} for ${typeName}`); diff --git a/test/mjsunit/wasm/reference-tables.js b/test/mjsunit/wasm/reference-tables.js index ed556ae5fa..884fe57dcf 100644 --- a/test/mjsunit/wasm/reference-tables.js +++ b/test/mjsunit/wasm/reference-tables.js @@ -322,6 +322,41 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js'); assertEquals(1, instance.exports.null_getter(2)); })(); +(function TestI31RefTable() { + print(arguments.callee.name); + let builder = new WasmModuleBuilder(); + + let table = builder.addTable(kWasmI31Ref, 4, 4); + builder.addActiveElementSegment( + table, wasmI32Const(0), + [[...wasmI32Const(10), kGCPrefix, kExprI31New], + [...wasmI32Const(-42), kGCPrefix, kExprI31New], + [kExprRefNull, kI31RefCode]], + kWasmI31Ref); + + builder.addFunction("i31GetI32", kSig_i_i) + .addBody([ + kExprLocalGet, 0, kExprTableGet, 0, + kGCPrefix, kExprI31GetS]) + .exportFunc(); + + builder.addFunction("i31GetNull", kSig_i_i) + .addBody([kExprLocalGet, 0, kExprTableGet, 0, kExprRefIsNull]) + .exportFunc(); + + let instance = builder.instantiate({}); + assertTrue(!!instance); + + assertEquals(0, instance.exports.i31GetNull(0)); + assertEquals(0, instance.exports.i31GetNull(1)); + assertEquals(1, instance.exports.i31GetNull(2)); + assertEquals(1, instance.exports.i31GetNull(3)); + assertEquals(10, instance.exports.i31GetI32(0)); + assertEquals(-42, instance.exports.i31GetI32(1)); + assertTraps(kTrapNullDereference, () => instance.exports.i31GetI32(2)); + assertTraps(kTrapNullDereference, () => instance.exports.i31GetI32(3)); +})(); + (function TestArrayRefTable() { print(arguments.callee.name); let builder = new WasmModuleBuilder(); From 004fbc01962cebd4fbb27d8c6be11f125138704b Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Wed, 21 Dec 2022 17:51:33 +0100 Subject: [PATCH 037/654] [maglev] Share LogicalNot implementation Bug: v8:7700 Change-Id: I67bc1ab26ec1065a6ede1ba2ada059d800adb93b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110807 Commit-Queue: Patrick Thier Reviewed-by: Patrick Thier Auto-Submit: Victor Gomes Cr-Commit-Position: refs/heads/main@{#84988} --- src/maglev/arm64/maglev-ir-arm64.cc | 1 - src/maglev/maglev-ir.cc | 26 ++++++++++++++++++++++++ src/maglev/x64/maglev-ir-x64.cc | 31 ----------------------------- 3 files changed, 26 insertions(+), 32 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 41019bb727..0263c683bd 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -121,7 +121,6 @@ UNIMPLEMENTED_NODE(LoadSignedIntTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(LoadUnsignedIntTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(LoadDoubleTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(HoleyFloat64Box) -UNIMPLEMENTED_NODE(LogicalNot) UNIMPLEMENTED_NODE(SetPendingMessage) UNIMPLEMENTED_NODE(TestUndetectable) UNIMPLEMENTED_NODE(TestTypeOf, literal_) diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 4732661286..45a5bb9916 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -1216,6 +1216,32 @@ void Abort::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { __ Trap(); } +void LogicalNot::SetValueLocationConstraints() { + UseAny(value()); + DefineAsRegister(this); +} +void LogicalNot::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + if (v8_flags.debug_code) { + // LogicalNot expects either TrueValue or FalseValue. + Label next; + __ JumpIf(__ IsRootConstant(value(), RootIndex::kFalseValue), &next); + __ JumpIf(__ IsRootConstant(value(), RootIndex::kTrueValue), &next); + __ Abort(AbortReason::kUnexpectedValue); + __ bind(&next); + } + + Label return_false, done; + __ JumpIf(__ IsRootConstant(value(), RootIndex::kTrueValue), &return_false); + __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue); + __ Jump(&done); + + __ bind(&return_false); + __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue); + + __ bind(&done); +} + int LoadNamedGeneric::MaxCallStackArgs() const { return LoadWithVectorDescriptor::GetStackParameterCount(); } diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 354d87fa08..ba9a7e9156 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -2156,37 +2156,6 @@ void CheckedTruncateNumberToInt32::GenerateCode(MaglevAssembler* masm, __ bind(&done); } -void LogicalNot::SetValueLocationConstraints() { - UseRegister(value()); - DefineAsRegister(this); -} -void LogicalNot::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - Register object = ToRegister(value()); - Register return_value = ToRegister(result()); - - if (v8_flags.debug_code) { - // LogicalNot expects either TrueValue or FalseValue. - Label next; - __ CompareRoot(object, RootIndex::kFalseValue); - __ j(equal, &next); - __ CompareRoot(object, RootIndex::kTrueValue); - __ Check(equal, AbortReason::kUnexpectedValue); - __ bind(&next); - } - - Label return_false, done; - __ CompareRoot(object, RootIndex::kTrueValue); - __ j(equal, &return_false, Label::kNear); - __ LoadRoot(return_value, RootIndex::kTrueValue); - __ jmp(&done, Label::kNear); - - __ bind(&return_false); - __ LoadRoot(return_value, RootIndex::kFalseValue); - - __ bind(&done); -} - void SetPendingMessage::SetValueLocationConstraints() { UseRegister(value()); set_temporaries_needed(1); From b844614174db395e8753ff0210b02403518dd23e Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Thu, 22 Dec 2022 10:44:51 +0100 Subject: [PATCH 038/654] [maglev][arm64] Add TestUndetectable and TestTypeOf Bug: v8:7700 Change-Id: I8212e14570481d76b26782afbd721f1884fae009 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4115753 Auto-Submit: Victor Gomes Commit-Queue: Patrick Thier Commit-Queue: Victor Gomes Reviewed-by: Patrick Thier Cr-Commit-Position: refs/heads/main@{#84989} --- src/maglev/arm64/maglev-ir-arm64.cc | 127 +++++++++++++++++++++++++++- 1 file changed, 125 insertions(+), 2 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 0263c683bd..4009251dc8 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -122,8 +122,6 @@ UNIMPLEMENTED_NODE(LoadUnsignedIntTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(LoadDoubleTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(HoleyFloat64Box) UNIMPLEMENTED_NODE(SetPendingMessage) -UNIMPLEMENTED_NODE(TestUndetectable) -UNIMPLEMENTED_NODE(TestTypeOf, literal_) UNIMPLEMENTED_NODE_WITH_CALL(ToObject) UNIMPLEMENTED_NODE_WITH_CALL(ToString) UNIMPLEMENTED_NODE(AssertInt32, condition_, reason_) @@ -1461,6 +1459,131 @@ void StringLength::GenerateCode(MaglevAssembler* masm, FieldMemOperand(object, String::kLengthOffset)); } +void TestUndetectable::SetValueLocationConstraints() { + UseRegister(value()); + DefineAsRegister(this); +} +void TestUndetectable::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(value()); + Register return_value = ToRegister(result()); + + Label return_false, done; + __ JumpIfSmi(object, &return_false); + { + // For heap objects, check the map's undetectable bit. + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ LoadMap(scratch, object); + __ Ldr(scratch.W(), FieldMemOperand(scratch, Map::kBitFieldOffset)); + __ TestAndBranchIfAllClear( + scratch.W(), Map::Bits1::IsUndetectableBit::kMask, &return_false); + } + + __ LoadRoot(return_value, RootIndex::kTrueValue); + __ B(&done); + + __ bind(&return_false); + __ LoadRoot(return_value, RootIndex::kFalseValue); + + __ bind(&done); +} + +void TestTypeOf::SetValueLocationConstraints() { + UseRegister(value()); + DefineAsRegister(this); +} +void TestTypeOf::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + using LiteralFlag = interpreter::TestTypeOfFlags::LiteralFlag; + Register object = ToRegister(value()); + // Use return register as temporary if needed. + Register scratch = ToRegister(result()); + Label is_true, is_false, done; + switch (literal_) { + case LiteralFlag::kNumber: + __ JumpIfSmi(object, &is_true); + __ Ldr(scratch.W(), FieldMemOperand(object, HeapObject::kMapOffset)); + __ CompareRoot(scratch.W(), RootIndex::kHeapNumberMap); + __ B(ne, &is_false); + break; + case LiteralFlag::kString: + __ JumpIfSmi(object, &is_false); + __ LoadMap(scratch, object); + __ CompareInstanceTypeRange(scratch, scratch, FIRST_STRING_TYPE, + LAST_STRING_TYPE); + __ B(hi, &is_false); + break; + case LiteralFlag::kSymbol: + __ JumpIfSmi(object, &is_false); + __ LoadMap(scratch, object); + __ CompareInstanceType(scratch, scratch, SYMBOL_TYPE); + __ B(ne, &is_false); + break; + case LiteralFlag::kBoolean: + __ CompareRoot(object, RootIndex::kTrueValue); + __ B(eq, &is_true); + __ CompareRoot(object, RootIndex::kFalseValue); + __ B(ne, &is_false); + break; + case LiteralFlag::kBigInt: + __ JumpIfSmi(object, &is_false); + __ LoadMap(scratch, object); + __ CompareInstanceType(scratch, scratch, BIGINT_TYPE); + __ B(ne, &is_false); + break; + case LiteralFlag::kUndefined: + __ JumpIfSmi(object, &is_false); + // Check it has the undetectable bit set and it is not null. + __ LoadMap(scratch, object); + __ Ldr(scratch.W(), FieldMemOperand(scratch, Map::kBitFieldOffset)); + __ TestAndBranchIfAllClear( + scratch.W(), Map::Bits1::IsUndetectableBit::kMask, &is_false); + __ CompareRoot(object, RootIndex::kNullValue); + __ B(eq, &is_false); + break; + case LiteralFlag::kFunction: + __ JumpIfSmi(object, &is_false); + // Check if callable bit is set and not undetectable. + __ LoadMap(scratch, object); + __ Ldr(scratch.W(), FieldMemOperand(scratch, Map::kBitFieldOffset)); + __ And(scratch.W(), scratch.W(), + Map::Bits1::IsUndetectableBit::kMask | + Map::Bits1::IsCallableBit::kMask); + __ Cmp(scratch.W(), Map::Bits1::IsCallableBit::kMask); + __ B(ne, &is_false); + break; + case LiteralFlag::kObject: + __ JumpIfSmi(object, &is_false); + // If the object is null then return true. + __ CompareRoot(object, RootIndex::kNullValue); + __ B(eq, &is_true); + // Check if the object is a receiver type, + __ LoadMap(scratch, object); + { + UseScratchRegisterScope temps(masm); + __ CompareInstanceType(scratch, temps.AcquireX(), + FIRST_JS_RECEIVER_TYPE); + } + __ B(lt, &is_false); + // ... and is not undefined (undetectable) nor callable. + __ Ldr(scratch.W(), FieldMemOperand(scratch, Map::kBitFieldOffset)); + __ TestAndBranchIfAnySet(scratch.W(), + Map::Bits1::IsUndetectableBit::kMask | + Map::Bits1::IsCallableBit::kMask, + &is_false); + break; + case LiteralFlag::kOther: + UNREACHABLE(); + } + __ bind(&is_true); + __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue); + __ B(&done); + __ bind(&is_false); + __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue); + __ bind(&done); +} + int ThrowIfNotSuperConstructor::MaxCallStackArgs() const { return 2; } void ThrowIfNotSuperConstructor::SetValueLocationConstraints() { UseRegister(constructor()); From b97130344dcb7b3c92c8ebbc6e71480542254031 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Fl=C3=BCckiger?= Date: Thu, 22 Dec 2022 10:21:46 +0000 Subject: [PATCH 039/654] [static-roots] Avoid accessing uninitialized read only roots MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the future we want to be able to return statically known pointers of read only roots. To be able to do so we must ensure that heap initialization code does not rely on the fact that the root is not initialized yet and the accessor returns null. Instead we must explicitly test if the root is initialized. Bug: v8:13466 Change-Id: Id2b93388f499a58ff26ca5fb566b4538e00916e3 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4106452 Reviewed-by: Toon Verwaest Commit-Queue: Olivier Flückiger Reviewed-by: Jakob Linke Cr-Commit-Position: refs/heads/main@{#84990} --- src/heap/factory.cc | 4 ++-- src/heap/heap.cc | 16 ++++++++++------ src/objects/ordered-hash-table.cc | 2 +- src/roots/roots-inl.h | 13 ++++++++++++- src/roots/roots.cc | 3 ++- src/roots/roots.h | 4 ++++ 6 files changed, 31 insertions(+), 11 deletions(-) diff --git a/src/heap/factory.cc b/src/heap/factory.cc index 7b9de08f36..4b115eb041 100644 --- a/src/heap/factory.cc +++ b/src/heap/factory.cc @@ -641,8 +641,8 @@ Handle Factory::NewPropertyDescriptorObject() { Handle Factory::CreateCanonicalEmptySwissNameDictionary() { // This function is only supposed to be used to create the canonical empty // version and should not be used afterwards. - DCHECK_EQ(kNullAddress, ReadOnlyRoots(isolate()).at( - RootIndex::kEmptySwissPropertyDictionary)); + DCHECK(!ReadOnlyRoots(isolate()).is_initialized( + RootIndex::kEmptySwissPropertyDictionary)); ReadOnlyRoots roots(isolate()); diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 3c172a09c0..f0ffe5f236 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -3157,6 +3157,8 @@ void CreateFillerObjectAtImpl(Heap* heap, Address addr, int size, if (size == kTaggedSize) { filler.set_map_after_allocation(roots.unchecked_one_pointer_filler_map(), SKIP_WRITE_BARRIER); + // Ensure the filler map is properly initialized. + DCHECK(filler.map(heap->isolate()).IsMap()); } else if (size == 2 * kTaggedSize) { filler.set_map_after_allocation(roots.unchecked_two_pointer_filler_map(), SKIP_WRITE_BARRIER); @@ -3164,6 +3166,8 @@ void CreateFillerObjectAtImpl(Heap* heap, Address addr, int size, AtomicSlot slot(ObjectSlot(addr) + 1); *slot = static_cast(kClearedFreeMemoryValue); } + // Ensure the filler map is properly initialized. + DCHECK(filler.map(heap->isolate()).IsMap()); } else { DCHECK_GT(size, 2 * kTaggedSize); filler.set_map_after_allocation(roots.unchecked_free_space_map(), @@ -3173,13 +3177,13 @@ void CreateFillerObjectAtImpl(Heap* heap, Address addr, int size, MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue), (size / kTaggedSize) - 2); } - } - // At this point, we may be deserializing the heap from a snapshot, and - // none of the maps have been created yet and are nullptr. - DCHECK((filler.map_slot().contains_map_value(kNullAddress) && - !heap->deserialization_complete()) || - filler.map(heap->isolate()).IsMap()); + // During bootstrapping we need to create a free space object before its + // map is initialized. In this case we cannot access the map yet, as it + // might be null, or not set up properly yet. + DCHECK_IMPLIES(roots.is_initialized(RootIndex::kFreeSpaceMap), + filler.map(heap->isolate()).IsMap()); + } } #ifdef DEBUG diff --git a/src/objects/ordered-hash-table.cc b/src/objects/ordered-hash-table.cc index ee720e5608..ae1110c2f6 100644 --- a/src/objects/ordered-hash-table.cc +++ b/src/objects/ordered-hash-table.cc @@ -52,7 +52,7 @@ MaybeHandle OrderedHashTable::AllocateEmpty( // This is only supposed to be used to create the canonical empty versions // of each ordered structure, and should not be used afterwards. // Requires that the map has already been set up in the roots table. - DCHECK(ReadOnlyRoots(isolate).at(root_index) == kNullAddress); + DCHECK(!ReadOnlyRoots(isolate).is_initialized(root_index)); Handle backing_store = isolate->factory()->NewFixedArrayWithMap( Derived::GetMap(ReadOnlyRoots(isolate)), HashTableStartIndex(), diff --git a/src/roots/roots-inl.h b/src/roots/roots-inl.h index a9ee6578be..1b8ef87e1a 100644 --- a/src/roots/roots-inl.h +++ b/src/roots/roots-inl.h @@ -94,7 +94,12 @@ READ_ONLY_ROOT_LIST(ROOT_ACCESSOR) Address* ReadOnlyRoots::GetLocation(RootIndex root_index) const { size_t index = static_cast(root_index); DCHECK_LT(index, kEntriesCount); - return &read_only_roots_[index]; + Address* location = &read_only_roots_[index]; + // Filler objects must be created before the free space map is initialized. + // Bootstrapping is able to handle kNullAddress being returned here. + DCHECK_IMPLIES(*location == kNullAddress, + root_index == RootIndex::kFreeSpaceMap); + return location; } Address ReadOnlyRoots::first_name_for_protector() const { @@ -121,6 +126,12 @@ Address ReadOnlyRoots::at(RootIndex root_index) const { return *GetLocation(root_index); } +bool ReadOnlyRoots::is_initialized(RootIndex root_index) const { + size_t index = static_cast(root_index); + DCHECK_LT(index, kEntriesCount); + return read_only_roots_[index] != kNullAddress; +} + } // namespace internal } // namespace v8 diff --git a/src/roots/roots.cc b/src/roots/roots.cc index f280498a0d..a73b0e96e4 100644 --- a/src/roots/roots.cc +++ b/src/roots/roots.cc @@ -77,7 +77,8 @@ void ReadOnlyRoots::InitFromStaticRootsTable(Address cage_base) { for (auto element : StaticReadOnlyRootsPointerTable) { auto ptr = V8HeapCompressionScheme::DecompressTaggedPointer(cage_base, element); - *GetLocation(pos) = ptr; + DCHECK(!is_initialized(pos)); + read_only_roots_[static_cast(pos)] = ptr; ++pos; } DCHECK_EQ(static_cast(pos) - 1, RootIndex::kLastReadOnlyRoot); diff --git a/src/roots/roots.h b/src/roots/roots.h index b5c5196575..69b12db12a 100644 --- a/src/roots/roots.h +++ b/src/roots/roots.h @@ -639,6 +639,10 @@ class ReadOnlyRoots { // Get the address of a given read-only root index, without type checks. V8_INLINE Address at(RootIndex root_index) const; + // Check if a slot is initialized yet. Should only be neccessary for code + // running during snapshot creation. + V8_INLINE bool is_initialized(RootIndex root_index) const; + // Iterate over all the read-only roots. This is not necessary for garbage // collection and is usually only performed as part of (de)serialization or // heap verification. From b247270178dcfffe9af4389dbb84d1643bfccea4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Fl=C3=BCckiger?= Date: Thu, 22 Dec 2022 11:57:54 +0000 Subject: [PATCH 040/654] Reland "[static-roots] Enable static roots on supported configurations" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a reland of commit c04ca9cc63417d24455704cbee44eb60b79f7af2 Original change's description: > [static-roots] Enable static roots on supported configurations > > The static root values are not actually used yet. > > Bug: v8:13466 > Change-Id: I85fc99277c31e0dd4350a305040ab25456051046 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4101880 > Reviewed-by: Toon Verwaest > Commit-Queue: Olivier Flückiger > Cr-Commit-Position: refs/heads/main@{#84850} Bug: v8:13466 Change-Id: Id65bb5b19df999dfe930a78993e4bf3343d9f996 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111641 Auto-Submit: Olivier Flückiger Reviewed-by: Toon Verwaest Commit-Queue: Toon Verwaest Cr-Commit-Position: refs/heads/main@{#84991} --- BUILD.gn | 2 +- src/roots/static-roots.h | 753 ++++++++++++++++++++++++++++++- src/snapshot/static-roots-gen.cc | 3 + tools/v8heapconst.py | 474 +++++++++---------- 4 files changed, 992 insertions(+), 240 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index 682dfddc4a..50165520c2 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -121,7 +121,7 @@ declare_args() { v8_enable_snapshot_native_code_counters = "" # Use pre-generated static root pointer values from static-roots.h. - v8_enable_static_roots = false + v8_enable_static_roots = "" # Enable code-generation-time checking of types in the CodeStubAssembler. v8_enable_verify_csa = false diff --git a/src/roots/static-roots.h b/src/roots/static-roots.h index eb4aebd879..eeced43597 100644 --- a/src/roots/static-roots.h +++ b/src/roots/static-roots.h @@ -2,17 +2,766 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +// This file is automatically generated by `tools/dev/gen-static-roots.py`. Do +// not edit manually. + #ifndef V8_ROOTS_STATIC_ROOTS_H_ #define V8_ROOTS_STATIC_ROOTS_H_ #include "src/common/globals.h" + #if V8_STATIC_ROOTS_BOOL +// Disabling Wasm or Intl invalidates the contents of static-roots.h. +// TODO(olivf): To support static roots for multiple build configurations we +// will need to generate target specific versions of this file. +static_assert(V8_ENABLE_WEBASSEMBLY); +static_assert(V8_INTL_SUPPORT); + namespace v8 { namespace internal { -// TODO(olivf, v8:13466): Enable and add static roots -constexpr static std::array StaticReadOnlyRootsPointerTable = {}; +constexpr static std::array StaticReadOnlyRootsPointerTable = { + 0x3235, // free_space_map + 0x38a5, // one_pointer_filler_map + 0x38cd, // two_pointer_filler_map + 0x7b19, // uninitialized_value + 0x22e1, // undefined_value + 0x22fd, // the_hole_value + 0x22c5, // null_value + 0x3f41, // true_value + 0x3f5d, // false_value + 0x543d, // empty_string + 0x2141, // meta_map + 0x31e5, // byte_array_map + 0x2169, // fixed_array_map + 0x21e1, // fixed_cow_array_map + 0x3995, // hash_table_map + 0x2b7d, // symbol_map + 0x2dad, // one_byte_string_map + 0x2f3d, // one_byte_internalized_string_map + 0x2ab5, // scope_info_map + 0x3d05, // shared_function_info_map + 0x3825, // code_map + 0x384d, // cell_map + 0x387d, // global_property_cell_map + 0x2ba5, // foreign_map + 0x2b55, // heap_number_map + 0x396d, // transition_array_map + 0x302d, // thin_one_byte_string_map + 0x2b2d, // feedback_vector_map + 0x3f19, // empty_scope_info + 0x22a9, // empty_fixed_array + 0x2aa5, // empty_descriptor_array + 0x7b51, // arguments_marker + 0x7bc9, // exception + 0x7b89, // termination_exception + 0x7be5, // optimized_out + 0x7c1d, // stale_register + 0x3bc5, // script_context_table_map + 0x2b05, // closure_feedback_cell_array_map + 0x31bd, // feedback_metadata_map + 0x3b9d, // array_list_map + 0x2d5d, // bigint_map + 0x3bed, // object_boilerplate_description_map + 0x320d, // bytecode_array_map + 0x3d7d, // code_data_container_map + 0x3c15, // coverage_info_map + 0x3195, // fixed_double_array_map + 0x3a85, // global_dictionary_map + 0x3945, // many_closures_cell_map + 0x2bcd, // mega_dom_handler_map + 0x2add, // module_info_map + 0x3a35, // name_dictionary_map + 0x38f5, // no_closures_cell_map + 0x3aad, // number_dictionary_map + 0x391d, // one_closure_cell_map + 0x39bd, // ordered_hash_map_map + 0x39e5, // ordered_hash_set_map + 0x3afd, // name_to_index_hash_table_map + 0x3b25, // registered_symbol_table_map + 0x3a0d, // ordered_name_dictionary_map + 0x3cdd, // preparse_data_map + 0x325d, // property_array_map + 0x3c3d, // accessor_info_map + 0x3c65, // side_effect_call_handler_info_map + 0x3c8d, // side_effect_free_call_handler_info_map + 0x3cb5, // next_call_side_effect_free_call_handler_info_map + 0x3ad5, // simple_number_dictionary_map + 0x3285, // small_ordered_hash_map_map + 0x32ad, // small_ordered_hash_set_map + 0x32d5, // small_ordered_name_dictionary_map + 0x3d2d, // source_text_module_map + 0x3a5d, // swiss_name_dictionary_map + 0x3d55, // synthetic_module_map + 0x3da5, // wasm_api_function_ref_map + 0x3dcd, // wasm_capi_function_data_map + 0x3df5, // wasm_exported_function_data_map + 0x3e1d, // wasm_internal_function_map + 0x3e45, // wasm_js_function_data_map + 0x3e6d, // wasm_resume_data_map + 0x3e95, // wasm_type_info_map + 0x3ebd, // wasm_continuation_object_map + 0x2191, // weak_fixed_array_map + 0x21b9, // weak_array_list_map + 0x3b75, // ephemeron_hash_table_map + 0x3b4d, // embedder_data_array_map + 0x3ee5, // weak_cell_map + 0x2d85, // string_map + 0x2dfd, // cons_one_byte_string_map + 0x2dd5, // cons_string_map + 0x3005, // thin_string_map + 0x2e25, // sliced_string_map + 0x2e4d, // sliced_one_byte_string_map + 0x2e75, // external_string_map + 0x2e9d, // external_one_byte_string_map + 0x2ec5, // uncached_external_string_map + 0x2f15, // internalized_string_map + 0x2f65, // external_internalized_string_map + 0x2f8d, // external_one_byte_internalized_string_map + 0x2fb5, // uncached_external_internalized_string_map + 0x2fdd, // uncached_external_one_byte_internalized_string_map + 0x2eed, // uncached_external_one_byte_string_map + 0x307d, // shared_one_byte_string_map + 0x3055, // shared_string_map + 0x30cd, // shared_external_one_byte_string_map + 0x30a5, // shared_external_string_map + 0x311d, // shared_uncached_external_one_byte_string_map + 0x30f5, // shared_uncached_external_string_map + 0x316d, // shared_thin_one_byte_string_map + 0x3145, // shared_thin_string_map + 0x2231, // undefined_map + 0x2281, // the_hole_map + 0x2259, // null_map + 0x2bf5, // boolean_map + 0x2c1d, // uninitialized_map + 0x2c45, // arguments_marker_map + 0x2c6d, // exception_map + 0x2c95, // termination_exception_map + 0x2cbd, // optimized_out_map + 0x2ce5, // stale_register_map + 0x2d0d, // self_reference_marker_map + 0x2d35, // basic_block_counters_marker_map + 0x2a99, // empty_enum_cache + 0x3f81, // empty_property_array + 0x3f79, // empty_byte_array + 0x3f29, // empty_object_boilerplate_description + 0x3f35, // empty_array_boilerplate_description + 0x3f89, // empty_closure_feedback_cell_array + 0x820d, // empty_slow_element_dictionary + 0x8231, // empty_ordered_hash_map + 0x8245, // empty_ordered_hash_set + 0x829d, // empty_feedback_metadata + 0x81c9, // empty_property_dictionary + 0x8259, // empty_ordered_property_dictionary + 0x827d, // empty_swiss_property_dictionary + 0x3f91, // noop_interceptor_info + 0x3f0d, // empty_array_list + 0x22b1, // empty_weak_fixed_array + 0x22b9, // empty_weak_array_list + 0x3875, // invalid_prototype_validity_cell + 0x3fc5, // nan_value + 0x3fd1, // hole_nan_value + 0x3fdd, // infinity_value + 0x3fb9, // minus_zero_value + 0x3fe9, // minus_infinity_value + 0x3ff5, // max_safe_integer + 0x4001, // max_uint_32 + 0x400d, // smi_min_value + 0x4019, // smi_max_value_plus_one + 0x4035, // single_character_string_table + 0x7c55, // self_reference_marker + 0x7c95, // basic_block_counters_marker + 0x831d, // off_heap_trampoline_relocation_info + 0x22e1, // trampoline_trivial_code_data_container + 0x22e1, // trampoline_promise_rejection_code_data_container + 0x82a9, // global_this_binding_scope_info + 0x82c9, // empty_function_scope_info + 0x82ed, // native_scope_info + 0x8305, // shadow_realm_scope_info + 0x81f1, // empty_symbol_table + 0x4025, // hash_seed + 0x5449, // adoptText_string + 0x5461, // approximatelySign_string + 0x5481, // baseName_string + 0x5495, // accounting_string + 0x54ad, // breakType_string + 0x54c5, // calendars_string + 0x54dd, // cardinal_string + 0x54f1, // caseFirst_string + 0x5509, // ceil_string + 0x5519, // compare_string + 0x552d, // collation_string + 0x5545, // collations_string + 0x555d, // compact_string + 0x5571, // compactDisplay_string + 0x558d, // currency_string + 0x55a1, // currencyDisplay_string + 0x55bd, // currencySign_string + 0x55d5, // dateStyle_string + 0x55ed, // dateTimeField_string + 0x5609, // dayPeriod_string + 0x5621, // daysDisplay_string + 0x5639, // decimal_string + 0x564d, // dialect_string + 0x5661, // digital_string + 0x5675, // direction_string + 0x568d, // endRange_string + 0x56a1, // engineering_string + 0x56b9, // exceptZero_string + 0x56d1, // expand_string + 0x56e5, // exponentInteger_string + 0x5701, // exponentMinusSign_string + 0x5721, // exponentSeparator_string + 0x5741, // fallback_string + 0x5755, // first_string + 0x5769, // firstDay_string + 0x577d, // floor_string + 0x5791, // format_string + 0x57a5, // fraction_string + 0x57b9, // fractionalDigits_string + 0x57d5, // fractionalSecond_string + 0x57f1, // full_string + 0x5801, // granularity_string + 0x5819, // grapheme_string + 0x582d, // group_string + 0x5841, // h11_string + 0x5851, // h12_string + 0x5861, // h23_string + 0x5871, // h24_string + 0x5881, // halfCeil_string + 0x5895, // halfEven_string + 0x58a9, // halfExpand_string + 0x58c1, // halfFloor_string + 0x58d9, // halfTrunc_string + 0x58f1, // hour12_string + 0x5905, // hourCycle_string + 0x591d, // hourCycles_string + 0x5935, // hoursDisplay_string + 0x594d, // ideo_string + 0x595d, // ignorePunctuation_string + 0x597d, // Invalid_Date_string + 0x5995, // integer_string + 0x59a9, // isWordLike_string + 0x59c1, // kana_string + 0x59d1, // language_string + 0x59e5, // languageDisplay_string + 0x5a01, // lessPrecision_string + 0x5a1d, // letter_string + 0x5a31, // list_string + 0x5a41, // literal_string + 0x5a55, // locale_string + 0x5a69, // loose_string + 0x5a7d, // lower_string + 0x5a91, // ltr_string + 0x5aa1, // maximumFractionDigits_string + 0x5ac5, // maximumSignificantDigits_string + 0x5ae9, // microsecondsDisplay_string + 0x5b09, // millisecondsDisplay_string + 0x5b29, // min2_string + 0x5b39, // minimalDays_string + 0x5b51, // minimumFractionDigits_string + 0x5b75, // minimumIntegerDigits_string + 0x5b95, // minimumSignificantDigits_string + 0x5bb9, // minus_0 + 0x5bc9, // minusSign_string + 0x5be1, // minutesDisplay_string + 0x5bfd, // monthsDisplay_string + 0x5c19, // morePrecision_string + 0x5c35, // nan_string + 0x5c45, // nanosecondsDisplay_string + 0x5c65, // narrowSymbol_string + 0x5c7d, // negative_string + 0x5c91, // never_string + 0x5ca5, // none_string + 0x5cb5, // notation_string + 0x5cc9, // normal_string + 0x5cdd, // numberingSystem_string + 0x5cf9, // numberingSystems_string + 0x5d15, // numeric_string + 0x5d29, // ordinal_string + 0x5d3d, // percentSign_string + 0x5d55, // plusSign_string + 0x5d69, // quarter_string + 0x5d7d, // region_string + 0x5d91, // relatedYear_string + 0x5da9, // roundingMode_string + 0x5dc1, // roundingPriority_string + 0x5ddd, // rtl_string + 0x5ded, // scientific_string + 0x5e05, // secondsDisplay_string + 0x5e21, // segment_string + 0x5e35, // SegmentIterator_string + 0x5e51, // Segments_string + 0x5e65, // sensitivity_string + 0x5e7d, // sep_string + 0x5e8d, // shared_string + 0x5ea1, // signDisplay_string + 0x5eb9, // standard_string + 0x5ecd, // startRange_string + 0x5ee5, // strict_string + 0x5ef9, // stripIfInteger_string + 0x5f15, // style_string + 0x5f29, // term_string + 0x5f39, // textInfo_string + 0x5f4d, // timeStyle_string + 0x5f65, // timeZones_string + 0x5f7d, // timeZoneName_string + 0x5f95, // trailingZeroDisplay_string + 0x5fb5, // trunc_string + 0x5fc9, // two_digit_string + 0x5fdd, // type_string + 0x5fed, // unknown_string + 0x6001, // upper_string + 0x6015, // usage_string + 0x6029, // useGrouping_string + 0x6041, // unitDisplay_string + 0x6059, // weekday_string + 0x606d, // weekend_string + 0x6081, // weeksDisplay_string + 0x6099, // weekInfo_string + 0x60ad, // yearName_string + 0x60c1, // yearsDisplay_string + 0x60d9, // add_string + 0x60e9, // AggregateError_string + 0x6105, // always_string + 0x6119, // anonymous_function_string + 0x6139, // anonymous_string + 0x6151, // apply_string + 0x6165, // Arguments_string + 0x617d, // arguments_string + 0x6195, // arguments_to_string + 0x61b5, // Array_string + 0x61c9, // array_to_string + 0x61e5, // ArrayBuffer_string + 0x61fd, // ArrayIterator_string + 0x6219, // as_string + 0x6229, // assert_string + 0x623d, // async_string + 0x6251, // AtomicsCondition_string + 0x6271, // AtomicsMutex_string + 0x628d, // auto_string + 0x629d, // await_string + 0x62b1, // BigInt_string + 0x62c5, // bigint_string + 0x62d9, // BigInt64Array_string + 0x62f5, // BigUint64Array_string + 0x6311, // bind_string + 0x6321, // blank_string + 0x6335, // Boolean_string + 0x6349, // boolean_string + 0x635d, // boolean_to_string + 0x6379, // bound__string + 0x638d, // buffer_string + 0x63a1, // byte_length_string + 0x63b9, // byte_offset_string + 0x63d1, // CompileError_string + 0x63e9, // calendar_string + 0x63fd, // callee_string + 0x6411, // caller_string + 0x6425, // cause_string + 0x6439, // character_string + 0x6451, // closure_string + 0x6469, // code_string + 0x6479, // column_string + 0x648d, // computed_string + 0x64a5, // configurable_string + 0x64bd, // conjunction_string + 0x64d5, // console_string + 0x64e9, // constrain_string + 0x6501, // construct_string + 0x6519, // current_string + 0x652d, // Date_string + 0x653d, // date_to_string + 0x6559, // dateAdd_string + 0x656d, // dateFromFields_string + 0x6589, // dateUntil_string + 0x65a1, // day_string + 0x65b1, // dayOfWeek_string + 0x65c9, // dayOfYear_string + 0x65e1, // days_string + 0x65f1, // daysInMonth_string + 0x6609, // daysInWeek_string + 0x6621, // daysInYear_string + 0x6639, // default_string + 0x664d, // defineProperty_string + 0x6669, // deleteProperty_string + 0x6685, // disjunction_string + 0x669d, // done_string + 0x66ad, // dot_brand_string + 0x66c1, // dot_catch_string + 0x66d5, // dot_default_string + 0x66e9, // dot_for_string + 0x66f9, // dot_generator_object_string + 0x6719, // dot_home_object_string + 0x6731, // dot_new_target_string + 0x6749, // dot_result_string + 0x675d, // dot_repl_result_string + 0x6775, // dot_static_home_object_string + 0x471d, // dot_string + 0x6795, // dot_switch_tag_string + 0x67ad, // dotAll_string + 0x67c1, // Error_string + 0x67d5, // EvalError_string + 0x67ed, // enumerable_string + 0x6805, // element_string + 0x6819, // epochMicroseconds_string + 0x6839, // epochMilliseconds_string + 0x6859, // epochNanoseconds_string + 0x6875, // epochSeconds_string + 0x688d, // era_string + 0x689d, // eraYear_string + 0x68b1, // errors_string + 0x68c5, // error_to_string + 0x68e1, // eval_string + 0x68f1, // exception_string + 0x6909, // exec_string + 0x6919, // false_string + 0x692d, // fields_string + 0x6941, // FinalizationRegistry_string + 0x6961, // flags_string + 0x6975, // Float32Array_string + 0x698d, // Float64Array_string + 0x69a5, // fractionalSecondDigits_string + 0x69c9, // from_string + 0x69d9, // Function_string + 0x69ed, // function_native_code_string + 0x6a19, // function_string + 0x6a2d, // function_to_string + 0x6a4d, // Generator_string + 0x6a65, // get_space_string + 0x6a75, // get_string + 0x6a85, // getOffsetNanosecondsFor_string + 0x6aa9, // getOwnPropertyDescriptor_string + 0x6acd, // getPossibleInstantsFor_string + 0x6af1, // getPrototypeOf_string + 0x6b0d, // global_string + 0x6b21, // globalThis_string + 0x6b39, // groups_string + 0x6b4d, // growable_string + 0x6b61, // has_string + 0x6b71, // hasIndices_string + 0x6b89, // hour_string + 0x6b99, // hours_string + 0x6bad, // hoursInDay_string + 0x6bc5, // ignoreCase_string + 0x6bdd, // id_string + 0x6bed, // illegal_access_string + 0x6c09, // illegal_argument_string + 0x6c25, // inLeapYear_string + 0x6c3d, // index_string + 0x6c51, // indices_string + 0x6c65, // Infinity_string + 0x6c79, // infinity_string + 0x6c8d, // input_string + 0x6ca1, // Int16Array_string + 0x6cb9, // Int32Array_string + 0x6cd1, // Int8Array_string + 0x6ce9, // isExtensible_string + 0x6d01, // iso8601_string + 0x6d15, // isoDay_string + 0x6d29, // isoHour_string + 0x6d3d, // isoMicrosecond_string + 0x6d59, // isoMillisecond_string + 0x6d75, // isoMinute_string + 0x6d8d, // isoMonth_string + 0x6da1, // isoNanosecond_string + 0x6dbd, // isoSecond_string + 0x6dd5, // isoYear_string + 0x6de9, // jsMemoryEstimate_string + 0x6e05, // jsMemoryRange_string + 0x6e21, // keys_string + 0x6e31, // largestUnit_string + 0x6e49, // lastIndex_string + 0x6e61, // length_string + 0x6e75, // let_string + 0x6e85, // line_string + 0x6e95, // linear_string + 0x6ea9, // LinkError_string + 0x6ec1, // long_string + 0x6ed1, // Map_string + 0x6ee1, // MapIterator_string + 0x6ef9, // max_byte_length_string + 0x6f15, // medium_string + 0x6f29, // mergeFields_string + 0x6f41, // message_string + 0x6f55, // meta_string + 0x6f65, // minus_Infinity_string + 0x6f7d, // microsecond_string + 0x6f95, // microseconds_string + 0x6fad, // millisecond_string + 0x6fc5, // milliseconds_string + 0x6fdd, // minute_string + 0x6ff1, // minutes_string + 0x7005, // Module_string + 0x7019, // month_string + 0x702d, // monthDayFromFields_string + 0x704d, // months_string + 0x7061, // monthsInYear_string + 0x7079, // monthCode_string + 0x7091, // multiline_string + 0x70a9, // name_string + 0x70b9, // NaN_string + 0x70c9, // nanosecond_string + 0x70e1, // nanoseconds_string + 0x70f9, // narrow_string + 0x710d, // native_string + 0x6731, // new_target_string + 0x7121, // NFC_string + 0x7131, // NFD_string + 0x7141, // NFKC_string + 0x7151, // NFKD_string + 0x7161, // not_equal_string + 0x7179, // null_string + 0x7189, // null_to_string + 0x71a5, // Number_string + 0x71b9, // number_string + 0x71cd, // number_to_string + 0x71e9, // Object_string + 0x71fd, // object_string + 0x7211, // object_to_string + 0x722d, // of_string + 0x723d, // offset_string + 0x7251, // offsetNanoseconds_string + 0x7271, // ok_string + 0x474d, // one_string + 0x7281, // other_string + 0x7295, // overflow_string + 0x72a9, // ownKeys_string + 0x72bd, // percent_string + 0x72d1, // plainDate_string + 0x72e9, // plainTime_string + 0x7301, // position_string + 0x7315, // preventExtensions_string + 0x7335, // private_constructor_string + 0x734d, // Promise_string + 0x7361, // proto_string + 0x7379, // prototype_string + 0x7391, // proxy_string + 0x73a5, // Proxy_string + 0x73b9, // query_colon_string + 0x73c9, // RangeError_string + 0x73e1, // raw_json_string + 0x73f5, // raw_string + 0x7405, // ReferenceError_string + 0x7421, // ReflectGet_string + 0x7439, // ReflectHas_string + 0x7451, // RegExp_string + 0x7465, // regexp_to_string + 0x7481, // reject_string + 0x7495, // relativeTo_string + 0x74ad, // resizable_string + 0x74c5, // ResizableArrayBuffer_string + 0x74e5, // return_string + 0x74f9, // revoke_string + 0x750d, // roundingIncrement_string + 0x752d, // RuntimeError_string + 0x7545, // WebAssemblyException_string + 0x7569, // Script_string + 0x757d, // script_string + 0x7591, // second_string + 0x75a5, // seconds_string + 0x75b9, // short_string + 0x75cd, // Set_string + 0x75dd, // sentence_string + 0x75f1, // set_space_string + 0x7601, // set_string + 0x7611, // SetIterator_string + 0x7629, // setPrototypeOf_string + 0x7645, // ShadowRealm_string + 0x765d, // SharedArray_string + 0x7675, // SharedArrayBuffer_string + 0x7695, // SharedStruct_string + 0x76ad, // sign_string + 0x76bd, // smallestUnit_string + 0x76d5, // source_string + 0x76e9, // sourceText_string + 0x7701, // stack_string + 0x7715, // stackTraceLimit_string + 0x7731, // sticky_string + 0x7745, // String_string + 0x7759, // string_string + 0x776d, // string_to_string + 0x7789, // Symbol_iterator_string + 0x77a5, // symbol_species_string + 0x77c1, // Symbol_species_string + 0x77dd, // Symbol_string + 0x77f1, // symbol_string + 0x7805, // SyntaxError_string + 0x781d, // target_string + 0x7831, // this_function_string + 0x784d, // this_string + 0x785d, // throw_string + 0x7871, // timed_out_string + 0x7889, // timeZone_string + 0x789d, // toJSON_string + 0x78b1, // toString_string + 0x78c5, // true_string + 0x78d5, // total_string + 0x78e9, // TypeError_string + 0x7901, // Uint16Array_string + 0x7919, // Uint32Array_string + 0x7931, // Uint8Array_string + 0x7949, // Uint8ClampedArray_string + 0x7969, // undefined_string + 0x7981, // undefined_to_string + 0x79a1, // unicode_string + 0x79b5, // unicodeSets_string + 0x79cd, // unit_string + 0x79dd, // URIError_string + 0x79f1, // UTC_string + 0x7a01, // value_string + 0x7a15, // valueOf_string + 0x7a29, // WeakMap_string + 0x7a3d, // WeakRef_string + 0x7a51, // WeakSet_string + 0x7a65, // week_string + 0x7a75, // weeks_string + 0x7a89, // weekOfYear_string + 0x7aa1, // word_string + 0x7ab1, // writable_string + 0x7ac5, // yearMonthFromFields_string + 0x7ae5, // year_string + 0x7af5, // years_string + 0x473d, // zero_string + 0x7cd9, // array_buffer_wasm_memory_symbol + 0x7ce9, // call_site_info_symbol + 0x7cf9, // console_context_id_symbol + 0x7d09, // console_context_name_symbol + 0x7d19, // class_fields_symbol + 0x7d29, // class_positions_symbol + 0x7d39, // elements_transition_symbol + 0x7d49, // error_end_pos_symbol + 0x7d59, // error_script_symbol + 0x7d69, // error_stack_symbol + 0x7d79, // error_start_pos_symbol + 0x7d89, // frozen_symbol + 0x7d99, // interpreter_trampoline_symbol + 0x7da9, // mega_dom_symbol + 0x7db9, // megamorphic_symbol + 0x7dc9, // native_context_index_symbol + 0x7dd9, // nonextensible_symbol + 0x7de9, // not_mapped_symbol + 0x7df9, // promise_debug_marker_symbol + 0x7e09, // promise_debug_message_symbol + 0x7e19, // promise_forwarding_handler_symbol + 0x7e29, // promise_handled_by_symbol + 0x7e39, // promise_awaited_by_symbol + 0x7e49, // regexp_result_names_symbol + 0x7e59, // regexp_result_regexp_input_symbol + 0x7e69, // regexp_result_regexp_last_index_symbol + 0x7e79, // sealed_symbol + 0x7e89, // strict_function_transition_symbol + 0x7e99, // template_literal_function_literal_id_symbol + 0x7ea9, // template_literal_slot_id_symbol + 0x7eb9, // wasm_exception_tag_symbol + 0x7ec9, // wasm_exception_values_symbol + 0x7ed9, // wasm_uncatchable_symbol + 0x7ee9, // wasm_wrapped_object_symbol + 0x7ef9, // wasm_debug_proxy_cache_symbol + 0x7f09, // wasm_debug_proxy_names_symbol + 0x7f19, // uninitialized_symbol + 0x7f29, // async_iterator_symbol + 0x7f59, // intl_fallback_symbol + 0x7f91, // match_all_symbol + 0x7fbd, // match_symbol + 0x7fe5, // replace_symbol + 0x8011, // search_symbol + 0x803d, // split_symbol + 0x8065, // to_primitive_symbol + 0x8095, // unscopables_symbol + 0x80c5, // has_instance_symbol + 0x80f5, // to_string_tag_symbol + 0x2319, // promise_fulfill_reaction_job_task_map + 0x2341, // promise_reject_reaction_job_task_map + 0x2369, // callable_task_map + 0x2391, // callback_task_map + 0x23b9, // promise_resolve_thenable_job_task_map + 0x23e1, // function_template_info_map + 0x2409, // object_template_info_map + 0x2431, // access_check_info_map + 0x2459, // accessor_pair_map + 0x2481, // aliased_arguments_entry_map + 0x24a9, // allocation_memento_map + 0x24d1, // array_boilerplate_description_map + 0x24f9, // asm_wasm_data_map + 0x2521, // async_generator_request_map + 0x2549, // break_point_map + 0x2571, // break_point_info_map + 0x2599, // call_site_info_map + 0x25c1, // class_positions_map + 0x25e9, // debug_info_map + 0x2611, // enum_cache_map + 0x2639, // error_stack_data_map + 0x2661, // function_template_rare_data_map + 0x2689, // interceptor_info_map + 0x26b1, // interpreter_data_map + 0x26d9, // module_request_map + 0x2701, // promise_capability_map + 0x2729, // promise_on_stack_map + 0x2751, // promise_reaction_map + 0x2779, // property_descriptor_object_map + 0x27a1, // prototype_info_map + 0x27c9, // regexp_boilerplate_description_map + 0x27f1, // script_map + 0x2819, // script_or_module_map + 0x2841, // module_info_entry_map + 0x2869, // stack_frame_info_map + 0x2891, // template_object_description_map + 0x28b9, // tuple2_map + 0x28e1, // wasm_exception_tag_map + 0x2909, // wasm_indirect_function_table_map + 0x370d, // sloppy_arguments_elements_map + 0x2209, // descriptor_array_map + 0x3735, // strong_descriptor_array_map + 0x32fd, // uncompiled_data_without_preparse_data_map + 0x3325, // uncompiled_data_with_preparse_data_map + 0x334d, // uncompiled_data_without_preparse_data_with_job_map + 0x3375, // uncompiled_data_with_preparse_data_and_job_map + 0x339d, // on_heap_basic_block_profiler_data_map + 0x33c5, // turbofan_bitset_type_map + 0x33ed, // turbofan_union_type_map + 0x3415, // turbofan_range_type_map + 0x343d, // turbofan_heap_constant_type_map + 0x3465, // turbofan_other_number_constant_type_map + 0x348d, // turboshaft_word32type_map + 0x34b5, // turboshaft_word32range_type_map + 0x375d, // turboshaft_word32set_type_map + 0x34dd, // turboshaft_word64type_map + 0x3505, // turboshaft_word64range_type_map + 0x3785, // turboshaft_word64set_type_map + 0x352d, // turboshaft_float64type_map + 0x3555, // turboshaft_float64range_type_map + 0x37ad, // turboshaft_float64set_type_map + 0x357d, // internal_class_map + 0x35a5, // smi_pair_map + 0x35cd, // smi_box_map + 0x35f5, // exported_sub_class_base_map + 0x361d, // exported_sub_class_map + 0x3645, // abstract_internal_class_subclass1_map + 0x366d, // abstract_internal_class_subclass2_map + 0x37d5, // internal_class_with_smi_elements_map + 0x37fd, // internal_class_with_struct_elements_map + 0x3695, // exported_sub_class2_map + 0x36bd, // sort_state_map + 0x36e5, // wasm_string_view_iter_map + 0x2931, // allocation_site_map + 0x2959, // allocation_site_without_weaknext_map + 0x814d, // constructor_string + 0x8165, // next_string + 0x8175, // resolve_string + 0x8189, // then_string + 0x8199, // iterator_symbol + 0x81a9, // species_symbol + 0x81b9, // is_concat_spreadable_symbol + 0x2981, // load_handler1_map + 0x29a9, // load_handler2_map + 0x29d1, // load_handler3_map + 0x29f9, // store_handler0_map + 0x2a21, // store_handler1_map + 0x2a49, // store_handler2_map + 0x2a71, // store_handler3_map +}; } // namespace internal } // namespace v8 diff --git a/src/snapshot/static-roots-gen.cc b/src/snapshot/static-roots-gen.cc index 772b340b53..12d9265ad3 100644 --- a/src/snapshot/static-roots-gen.cc +++ b/src/snapshot/static-roots-gen.cc @@ -30,6 +30,9 @@ void StaticRootsTableGen::write(Isolate* isolate, const char* file) { "that can be\n" << "// found in the LICENSE file.\n" << "\n" + << "// This file is automatically generated by " + "`tools/dev/gen-static-roots.py`. Do\n// not edit manually.\n" + << "\n" << "#ifndef V8_ROOTS_STATIC_ROOTS_H_\n" << "#define V8_ROOTS_STATIC_ROOTS_H_\n" << "\n" diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py index 19e3330fd5..5f65d0b450 100644 --- a/tools/v8heapconst.py +++ b/tools/v8heapconst.py @@ -295,250 +295,250 @@ INSTANCE_TYPES = { # List of known V8 maps. KNOWN_MAPS = { ("read_only_space", 0x02141): (255, "MetaMap"), - ("read_only_space", 0x02169): (131, "NullMap"), - ("read_only_space", 0x02191): (237, "StrongDescriptorArrayMap"), + ("read_only_space", 0x02169): (175, "FixedArrayMap"), + ("read_only_space", 0x02191): (240, "WeakFixedArrayMap"), ("read_only_space", 0x021b9): (273, "WeakArrayListMap"), - ("read_only_space", 0x021fd): (154, "EnumCacheMap"), - ("read_only_space", 0x02231): (175, "FixedArrayMap"), - ("read_only_space", 0x0227d): (8, "OneByteInternalizedStringMap"), - ("read_only_space", 0x022c9): (252, "FreeSpaceMap"), - ("read_only_space", 0x022f1): (251, "OnePointerFillerMap"), - ("read_only_space", 0x02319): (251, "TwoPointerFillerMap"), - ("read_only_space", 0x02341): (131, "UninitializedMap"), - ("read_only_space", 0x023b9): (131, "UndefinedMap"), - ("read_only_space", 0x023fd): (130, "HeapNumberMap"), - ("read_only_space", 0x02431): (131, "TheHoleMap"), - ("read_only_space", 0x02491): (131, "BooleanMap"), - ("read_only_space", 0x02535): (190, "ByteArrayMap"), - ("read_only_space", 0x0255d): (175, "FixedCOWArrayMap"), - ("read_only_space", 0x02585): (176, "HashTableMap"), - ("read_only_space", 0x025ad): (128, "SymbolMap"), - ("read_only_space", 0x025d5): (40, "OneByteStringMap"), - ("read_only_space", 0x025fd): (261, "ScopeInfoMap"), - ("read_only_space", 0x02625): (262, "SharedFunctionInfoMap"), - ("read_only_space", 0x0264d): (245, "CodeMap"), - ("read_only_space", 0x02675): (244, "CellMap"), - ("read_only_space", 0x0269d): (260, "GlobalPropertyCellMap"), - ("read_only_space", 0x026c5): (204, "ForeignMap"), - ("read_only_space", 0x026ed): (241, "TransitionArrayMap"), - ("read_only_space", 0x02715): (45, "ThinOneByteStringMap"), - ("read_only_space", 0x0273d): (250, "FeedbackVectorMap"), - ("read_only_space", 0x02775): (131, "ArgumentsMarkerMap"), - ("read_only_space", 0x027d5): (131, "ExceptionMap"), - ("read_only_space", 0x02831): (131, "TerminationExceptionMap"), - ("read_only_space", 0x02899): (131, "OptimizedOutMap"), - ("read_only_space", 0x028f9): (131, "StaleRegisterMap"), - ("read_only_space", 0x02959): (189, "ScriptContextTableMap"), - ("read_only_space", 0x02981): (187, "ClosureFeedbackCellArrayMap"), - ("read_only_space", 0x029a9): (249, "FeedbackMetadataArrayMap"), - ("read_only_space", 0x029d1): (175, "ArrayListMap"), - ("read_only_space", 0x029f9): (129, "BigIntMap"), - ("read_only_space", 0x02a21): (188, "ObjectBoilerplateDescriptionMap"), - ("read_only_space", 0x02a49): (191, "BytecodeArrayMap"), - ("read_only_space", 0x02a71): (246, "CodeDataContainerMap"), - ("read_only_space", 0x02a99): (247, "CoverageInfoMap"), - ("read_only_space", 0x02ac1): (192, "FixedDoubleArrayMap"), - ("read_only_space", 0x02ae9): (178, "GlobalDictionaryMap"), - ("read_only_space", 0x02b11): (156, "ManyClosuresCellMap"), - ("read_only_space", 0x02b39): (256, "MegaDomHandlerMap"), - ("read_only_space", 0x02b61): (175, "ModuleInfoMap"), - ("read_only_space", 0x02b89): (179, "NameDictionaryMap"), - ("read_only_space", 0x02bb1): (156, "NoClosuresCellMap"), - ("read_only_space", 0x02bd9): (181, "NumberDictionaryMap"), - ("read_only_space", 0x02c01): (156, "OneClosureCellMap"), - ("read_only_space", 0x02c29): (182, "OrderedHashMapMap"), - ("read_only_space", 0x02c51): (183, "OrderedHashSetMap"), - ("read_only_space", 0x02c79): (180, "NameToIndexHashTableMap"), - ("read_only_space", 0x02ca1): (185, "RegisteredSymbolTableMap"), - ("read_only_space", 0x02cc9): (184, "OrderedNameDictionaryMap"), - ("read_only_space", 0x02cf1): (258, "PreparseDataMap"), - ("read_only_space", 0x02d19): (259, "PropertyArrayMap"), - ("read_only_space", 0x02d41): (242, "AccessorInfoMap"), - ("read_only_space", 0x02d69): (243, "SideEffectCallHandlerInfoMap"), - ("read_only_space", 0x02d91): (243, "SideEffectFreeCallHandlerInfoMap"), - ("read_only_space", 0x02db9): (243, "NextCallSideEffectFreeCallHandlerInfoMap"), - ("read_only_space", 0x02de1): (186, "SimpleNumberDictionaryMap"), - ("read_only_space", 0x02e09): (231, "SmallOrderedHashMapMap"), - ("read_only_space", 0x02e31): (232, "SmallOrderedHashSetMap"), - ("read_only_space", 0x02e59): (233, "SmallOrderedNameDictionaryMap"), - ("read_only_space", 0x02e81): (238, "SourceTextModuleMap"), - ("read_only_space", 0x02ea9): (266, "SwissNameDictionaryMap"), - ("read_only_space", 0x02ed1): (239, "SyntheticModuleMap"), - ("read_only_space", 0x02ef9): (267, "WasmApiFunctionRefMap"), - ("read_only_space", 0x02f21): (225, "WasmCapiFunctionDataMap"), - ("read_only_space", 0x02f49): (226, "WasmExportedFunctionDataMap"), - ("read_only_space", 0x02f71): (269, "WasmInternalFunctionMap"), - ("read_only_space", 0x02f99): (227, "WasmJSFunctionDataMap"), - ("read_only_space", 0x02fc1): (270, "WasmResumeDataMap"), - ("read_only_space", 0x02fe9): (272, "WasmTypeInfoMap"), - ("read_only_space", 0x03011): (268, "WasmContinuationObjectMap"), - ("read_only_space", 0x03039): (240, "WeakFixedArrayMap"), - ("read_only_space", 0x03061): (177, "EphemeronHashTableMap"), - ("read_only_space", 0x03089): (248, "EmbedderDataArrayMap"), - ("read_only_space", 0x030b1): (274, "WeakCellMap"), - ("read_only_space", 0x030d9): (32, "StringMap"), - ("read_only_space", 0x03101): (41, "ConsOneByteStringMap"), - ("read_only_space", 0x03129): (33, "ConsStringMap"), - ("read_only_space", 0x03151): (37, "ThinStringMap"), - ("read_only_space", 0x03179): (35, "SlicedStringMap"), - ("read_only_space", 0x031a1): (43, "SlicedOneByteStringMap"), - ("read_only_space", 0x031c9): (34, "ExternalStringMap"), - ("read_only_space", 0x031f1): (42, "ExternalOneByteStringMap"), - ("read_only_space", 0x03219): (50, "UncachedExternalStringMap"), - ("read_only_space", 0x03241): (0, "InternalizedStringMap"), - ("read_only_space", 0x03269): (2, "ExternalInternalizedStringMap"), - ("read_only_space", 0x03291): (10, "ExternalOneByteInternalizedStringMap"), - ("read_only_space", 0x032b9): (18, "UncachedExternalInternalizedStringMap"), - ("read_only_space", 0x032e1): (26, "UncachedExternalOneByteInternalizedStringMap"), - ("read_only_space", 0x03309): (58, "UncachedExternalOneByteStringMap"), - ("read_only_space", 0x03331): (104, "SharedOneByteStringMap"), - ("read_only_space", 0x03359): (96, "SharedStringMap"), - ("read_only_space", 0x03381): (106, "SharedExternalOneByteStringMap"), - ("read_only_space", 0x033a9): (98, "SharedExternalStringMap"), - ("read_only_space", 0x033d1): (122, "SharedUncachedExternalOneByteStringMap"), - ("read_only_space", 0x033f9): (114, "SharedUncachedExternalStringMap"), - ("read_only_space", 0x03421): (109, "SharedThinOneByteStringMap"), - ("read_only_space", 0x03449): (101, "SharedThinStringMap"), - ("read_only_space", 0x03471): (131, "SelfReferenceMarkerMap"), - ("read_only_space", 0x03499): (131, "BasicBlockCountersMarkerMap"), - ("read_only_space", 0x034dd): (146, "ArrayBoilerplateDescriptionMap"), - ("read_only_space", 0x035dd): (158, "InterceptorInfoMap"), - ("read_only_space", 0x07655): (132, "PromiseFulfillReactionJobTaskMap"), - ("read_only_space", 0x0767d): (133, "PromiseRejectReactionJobTaskMap"), - ("read_only_space", 0x076a5): (134, "CallableTaskMap"), - ("read_only_space", 0x076cd): (135, "CallbackTaskMap"), - ("read_only_space", 0x076f5): (136, "PromiseResolveThenableJobTaskMap"), - ("read_only_space", 0x0771d): (139, "FunctionTemplateInfoMap"), - ("read_only_space", 0x07745): (140, "ObjectTemplateInfoMap"), - ("read_only_space", 0x0776d): (141, "AccessCheckInfoMap"), - ("read_only_space", 0x07795): (142, "AccessorPairMap"), - ("read_only_space", 0x077bd): (143, "AliasedArgumentsEntryMap"), - ("read_only_space", 0x077e5): (144, "AllocationMementoMap"), - ("read_only_space", 0x0780d): (147, "AsmWasmDataMap"), - ("read_only_space", 0x07835): (148, "AsyncGeneratorRequestMap"), - ("read_only_space", 0x0785d): (149, "BreakPointMap"), - ("read_only_space", 0x07885): (150, "BreakPointInfoMap"), - ("read_only_space", 0x078ad): (151, "CallSiteInfoMap"), - ("read_only_space", 0x078d5): (152, "ClassPositionsMap"), - ("read_only_space", 0x078fd): (153, "DebugInfoMap"), - ("read_only_space", 0x07925): (155, "ErrorStackDataMap"), - ("read_only_space", 0x0794d): (157, "FunctionTemplateRareDataMap"), - ("read_only_space", 0x07975): (159, "InterpreterDataMap"), - ("read_only_space", 0x0799d): (160, "ModuleRequestMap"), - ("read_only_space", 0x079c5): (161, "PromiseCapabilityMap"), - ("read_only_space", 0x079ed): (162, "PromiseOnStackMap"), - ("read_only_space", 0x07a15): (163, "PromiseReactionMap"), - ("read_only_space", 0x07a3d): (164, "PropertyDescriptorObjectMap"), - ("read_only_space", 0x07a65): (165, "PrototypeInfoMap"), - ("read_only_space", 0x07a8d): (166, "RegExpBoilerplateDescriptionMap"), - ("read_only_space", 0x07ab5): (167, "ScriptMap"), - ("read_only_space", 0x07add): (168, "ScriptOrModuleMap"), - ("read_only_space", 0x07b05): (169, "SourceTextModuleInfoEntryMap"), - ("read_only_space", 0x07b2d): (170, "StackFrameInfoMap"), - ("read_only_space", 0x07b55): (171, "TemplateObjectDescriptionMap"), - ("read_only_space", 0x07b7d): (172, "Tuple2Map"), - ("read_only_space", 0x07ba5): (173, "WasmExceptionTagMap"), - ("read_only_space", 0x07bcd): (174, "WasmIndirectFunctionTableMap"), - ("read_only_space", 0x07bf5): (194, "SloppyArgumentsElementsMap"), - ("read_only_space", 0x07c1d): (236, "DescriptorArrayMap"), - ("read_only_space", 0x07c45): (222, "UncompiledDataWithoutPreparseDataMap"), - ("read_only_space", 0x07c6d): (220, "UncompiledDataWithPreparseDataMap"), - ("read_only_space", 0x07c95): (223, "UncompiledDataWithoutPreparseDataWithJobMap"), - ("read_only_space", 0x07cbd): (221, "UncompiledDataWithPreparseDataAndJobMap"), - ("read_only_space", 0x07ce5): (257, "OnHeapBasicBlockProfilerDataMap"), - ("read_only_space", 0x07d0d): (215, "TurbofanBitsetTypeMap"), - ("read_only_space", 0x07d35): (219, "TurbofanUnionTypeMap"), - ("read_only_space", 0x07d5d): (218, "TurbofanRangeTypeMap"), - ("read_only_space", 0x07d85): (216, "TurbofanHeapConstantTypeMap"), - ("read_only_space", 0x07dad): (217, "TurbofanOtherNumberConstantTypeMap"), - ("read_only_space", 0x07dd5): (198, "TurboshaftWord32TypeMap"), - ("read_only_space", 0x07dfd): (199, "TurboshaftWord32RangeTypeMap"), - ("read_only_space", 0x07e25): (200, "TurboshaftWord32SetTypeMap"), - ("read_only_space", 0x07e4d): (201, "TurboshaftWord64TypeMap"), - ("read_only_space", 0x07e75): (202, "TurboshaftWord64RangeTypeMap"), - ("read_only_space", 0x07e9d): (203, "TurboshaftWord64SetTypeMap"), - ("read_only_space", 0x07ec5): (195, "TurboshaftFloat64TypeMap"), - ("read_only_space", 0x07eed): (196, "TurboshaftFloat64RangeTypeMap"), - ("read_only_space", 0x07f15): (197, "TurboshaftFloat64SetTypeMap"), - ("read_only_space", 0x07f3d): (253, "InternalClassMap"), - ("read_only_space", 0x07f65): (264, "SmiPairMap"), - ("read_only_space", 0x07f8d): (263, "SmiBoxMap"), - ("read_only_space", 0x07fb5): (228, "ExportedSubClassBaseMap"), - ("read_only_space", 0x07fdd): (229, "ExportedSubClassMap"), - ("read_only_space", 0x08005): (234, "AbstractInternalClassSubclass1Map"), - ("read_only_space", 0x0802d): (235, "AbstractInternalClassSubclass2Map"), - ("read_only_space", 0x08055): (193, "InternalClassWithSmiElementsMap"), - ("read_only_space", 0x0807d): (254, "InternalClassWithStructElementsMap"), - ("read_only_space", 0x080a5): (230, "ExportedSubClass2Map"), - ("read_only_space", 0x080cd): (265, "SortStateMap"), - ("read_only_space", 0x080f5): (271, "WasmStringViewIterMap"), - ("read_only_space", 0x0811d): (145, "AllocationSiteWithWeakNextMap"), - ("read_only_space", 0x08145): (145, "AllocationSiteWithoutWeakNextMap"), - ("read_only_space", 0x08211): (137, "LoadHandler1Map"), - ("read_only_space", 0x08239): (137, "LoadHandler2Map"), - ("read_only_space", 0x08261): (137, "LoadHandler3Map"), - ("read_only_space", 0x08289): (138, "StoreHandler0Map"), - ("read_only_space", 0x082b1): (138, "StoreHandler1Map"), - ("read_only_space", 0x082d9): (138, "StoreHandler2Map"), - ("read_only_space", 0x08301): (138, "StoreHandler3Map"), + ("read_only_space", 0x021e1): (175, "FixedCOWArrayMap"), + ("read_only_space", 0x02209): (236, "DescriptorArrayMap"), + ("read_only_space", 0x02231): (131, "UndefinedMap"), + ("read_only_space", 0x02259): (131, "NullMap"), + ("read_only_space", 0x02281): (131, "TheHoleMap"), + ("read_only_space", 0x02319): (132, "PromiseFulfillReactionJobTaskMap"), + ("read_only_space", 0x02341): (133, "PromiseRejectReactionJobTaskMap"), + ("read_only_space", 0x02369): (134, "CallableTaskMap"), + ("read_only_space", 0x02391): (135, "CallbackTaskMap"), + ("read_only_space", 0x023b9): (136, "PromiseResolveThenableJobTaskMap"), + ("read_only_space", 0x023e1): (139, "FunctionTemplateInfoMap"), + ("read_only_space", 0x02409): (140, "ObjectTemplateInfoMap"), + ("read_only_space", 0x02431): (141, "AccessCheckInfoMap"), + ("read_only_space", 0x02459): (142, "AccessorPairMap"), + ("read_only_space", 0x02481): (143, "AliasedArgumentsEntryMap"), + ("read_only_space", 0x024a9): (144, "AllocationMementoMap"), + ("read_only_space", 0x024d1): (146, "ArrayBoilerplateDescriptionMap"), + ("read_only_space", 0x024f9): (147, "AsmWasmDataMap"), + ("read_only_space", 0x02521): (148, "AsyncGeneratorRequestMap"), + ("read_only_space", 0x02549): (149, "BreakPointMap"), + ("read_only_space", 0x02571): (150, "BreakPointInfoMap"), + ("read_only_space", 0x02599): (151, "CallSiteInfoMap"), + ("read_only_space", 0x025c1): (152, "ClassPositionsMap"), + ("read_only_space", 0x025e9): (153, "DebugInfoMap"), + ("read_only_space", 0x02611): (154, "EnumCacheMap"), + ("read_only_space", 0x02639): (155, "ErrorStackDataMap"), + ("read_only_space", 0x02661): (157, "FunctionTemplateRareDataMap"), + ("read_only_space", 0x02689): (158, "InterceptorInfoMap"), + ("read_only_space", 0x026b1): (159, "InterpreterDataMap"), + ("read_only_space", 0x026d9): (160, "ModuleRequestMap"), + ("read_only_space", 0x02701): (161, "PromiseCapabilityMap"), + ("read_only_space", 0x02729): (162, "PromiseOnStackMap"), + ("read_only_space", 0x02751): (163, "PromiseReactionMap"), + ("read_only_space", 0x02779): (164, "PropertyDescriptorObjectMap"), + ("read_only_space", 0x027a1): (165, "PrototypeInfoMap"), + ("read_only_space", 0x027c9): (166, "RegExpBoilerplateDescriptionMap"), + ("read_only_space", 0x027f1): (167, "ScriptMap"), + ("read_only_space", 0x02819): (168, "ScriptOrModuleMap"), + ("read_only_space", 0x02841): (169, "SourceTextModuleInfoEntryMap"), + ("read_only_space", 0x02869): (170, "StackFrameInfoMap"), + ("read_only_space", 0x02891): (171, "TemplateObjectDescriptionMap"), + ("read_only_space", 0x028b9): (172, "Tuple2Map"), + ("read_only_space", 0x028e1): (173, "WasmExceptionTagMap"), + ("read_only_space", 0x02909): (174, "WasmIndirectFunctionTableMap"), + ("read_only_space", 0x02931): (145, "AllocationSiteWithWeakNextMap"), + ("read_only_space", 0x02959): (145, "AllocationSiteWithoutWeakNextMap"), + ("read_only_space", 0x02981): (137, "LoadHandler1Map"), + ("read_only_space", 0x029a9): (137, "LoadHandler2Map"), + ("read_only_space", 0x029d1): (137, "LoadHandler3Map"), + ("read_only_space", 0x029f9): (138, "StoreHandler0Map"), + ("read_only_space", 0x02a21): (138, "StoreHandler1Map"), + ("read_only_space", 0x02a49): (138, "StoreHandler2Map"), + ("read_only_space", 0x02a71): (138, "StoreHandler3Map"), + ("read_only_space", 0x02ab5): (261, "ScopeInfoMap"), + ("read_only_space", 0x02add): (175, "ModuleInfoMap"), + ("read_only_space", 0x02b05): (187, "ClosureFeedbackCellArrayMap"), + ("read_only_space", 0x02b2d): (250, "FeedbackVectorMap"), + ("read_only_space", 0x02b55): (130, "HeapNumberMap"), + ("read_only_space", 0x02b7d): (128, "SymbolMap"), + ("read_only_space", 0x02ba5): (204, "ForeignMap"), + ("read_only_space", 0x02bcd): (256, "MegaDomHandlerMap"), + ("read_only_space", 0x02bf5): (131, "BooleanMap"), + ("read_only_space", 0x02c1d): (131, "UninitializedMap"), + ("read_only_space", 0x02c45): (131, "ArgumentsMarkerMap"), + ("read_only_space", 0x02c6d): (131, "ExceptionMap"), + ("read_only_space", 0x02c95): (131, "TerminationExceptionMap"), + ("read_only_space", 0x02cbd): (131, "OptimizedOutMap"), + ("read_only_space", 0x02ce5): (131, "StaleRegisterMap"), + ("read_only_space", 0x02d0d): (131, "SelfReferenceMarkerMap"), + ("read_only_space", 0x02d35): (131, "BasicBlockCountersMarkerMap"), + ("read_only_space", 0x02d5d): (129, "BigIntMap"), + ("read_only_space", 0x02d85): (32, "StringMap"), + ("read_only_space", 0x02dad): (40, "OneByteStringMap"), + ("read_only_space", 0x02dd5): (33, "ConsStringMap"), + ("read_only_space", 0x02dfd): (41, "ConsOneByteStringMap"), + ("read_only_space", 0x02e25): (35, "SlicedStringMap"), + ("read_only_space", 0x02e4d): (43, "SlicedOneByteStringMap"), + ("read_only_space", 0x02e75): (34, "ExternalStringMap"), + ("read_only_space", 0x02e9d): (42, "ExternalOneByteStringMap"), + ("read_only_space", 0x02ec5): (50, "UncachedExternalStringMap"), + ("read_only_space", 0x02eed): (58, "UncachedExternalOneByteStringMap"), + ("read_only_space", 0x02f15): (0, "InternalizedStringMap"), + ("read_only_space", 0x02f3d): (8, "OneByteInternalizedStringMap"), + ("read_only_space", 0x02f65): (2, "ExternalInternalizedStringMap"), + ("read_only_space", 0x02f8d): (10, "ExternalOneByteInternalizedStringMap"), + ("read_only_space", 0x02fb5): (18, "UncachedExternalInternalizedStringMap"), + ("read_only_space", 0x02fdd): (26, "UncachedExternalOneByteInternalizedStringMap"), + ("read_only_space", 0x03005): (37, "ThinStringMap"), + ("read_only_space", 0x0302d): (45, "ThinOneByteStringMap"), + ("read_only_space", 0x03055): (96, "SharedStringMap"), + ("read_only_space", 0x0307d): (104, "SharedOneByteStringMap"), + ("read_only_space", 0x030a5): (98, "SharedExternalStringMap"), + ("read_only_space", 0x030cd): (106, "SharedExternalOneByteStringMap"), + ("read_only_space", 0x030f5): (114, "SharedUncachedExternalStringMap"), + ("read_only_space", 0x0311d): (122, "SharedUncachedExternalOneByteStringMap"), + ("read_only_space", 0x03145): (101, "SharedThinStringMap"), + ("read_only_space", 0x0316d): (109, "SharedThinOneByteStringMap"), + ("read_only_space", 0x03195): (192, "FixedDoubleArrayMap"), + ("read_only_space", 0x031bd): (249, "FeedbackMetadataArrayMap"), + ("read_only_space", 0x031e5): (190, "ByteArrayMap"), + ("read_only_space", 0x0320d): (191, "BytecodeArrayMap"), + ("read_only_space", 0x03235): (252, "FreeSpaceMap"), + ("read_only_space", 0x0325d): (259, "PropertyArrayMap"), + ("read_only_space", 0x03285): (231, "SmallOrderedHashMapMap"), + ("read_only_space", 0x032ad): (232, "SmallOrderedHashSetMap"), + ("read_only_space", 0x032d5): (233, "SmallOrderedNameDictionaryMap"), + ("read_only_space", 0x032fd): (222, "UncompiledDataWithoutPreparseDataMap"), + ("read_only_space", 0x03325): (220, "UncompiledDataWithPreparseDataMap"), + ("read_only_space", 0x0334d): (223, "UncompiledDataWithoutPreparseDataWithJobMap"), + ("read_only_space", 0x03375): (221, "UncompiledDataWithPreparseDataAndJobMap"), + ("read_only_space", 0x0339d): (257, "OnHeapBasicBlockProfilerDataMap"), + ("read_only_space", 0x033c5): (215, "TurbofanBitsetTypeMap"), + ("read_only_space", 0x033ed): (219, "TurbofanUnionTypeMap"), + ("read_only_space", 0x03415): (218, "TurbofanRangeTypeMap"), + ("read_only_space", 0x0343d): (216, "TurbofanHeapConstantTypeMap"), + ("read_only_space", 0x03465): (217, "TurbofanOtherNumberConstantTypeMap"), + ("read_only_space", 0x0348d): (198, "TurboshaftWord32TypeMap"), + ("read_only_space", 0x034b5): (199, "TurboshaftWord32RangeTypeMap"), + ("read_only_space", 0x034dd): (201, "TurboshaftWord64TypeMap"), + ("read_only_space", 0x03505): (202, "TurboshaftWord64RangeTypeMap"), + ("read_only_space", 0x0352d): (195, "TurboshaftFloat64TypeMap"), + ("read_only_space", 0x03555): (196, "TurboshaftFloat64RangeTypeMap"), + ("read_only_space", 0x0357d): (253, "InternalClassMap"), + ("read_only_space", 0x035a5): (264, "SmiPairMap"), + ("read_only_space", 0x035cd): (263, "SmiBoxMap"), + ("read_only_space", 0x035f5): (228, "ExportedSubClassBaseMap"), + ("read_only_space", 0x0361d): (229, "ExportedSubClassMap"), + ("read_only_space", 0x03645): (234, "AbstractInternalClassSubclass1Map"), + ("read_only_space", 0x0366d): (235, "AbstractInternalClassSubclass2Map"), + ("read_only_space", 0x03695): (230, "ExportedSubClass2Map"), + ("read_only_space", 0x036bd): (265, "SortStateMap"), + ("read_only_space", 0x036e5): (271, "WasmStringViewIterMap"), + ("read_only_space", 0x0370d): (194, "SloppyArgumentsElementsMap"), + ("read_only_space", 0x03735): (237, "StrongDescriptorArrayMap"), + ("read_only_space", 0x0375d): (200, "TurboshaftWord32SetTypeMap"), + ("read_only_space", 0x03785): (203, "TurboshaftWord64SetTypeMap"), + ("read_only_space", 0x037ad): (197, "TurboshaftFloat64SetTypeMap"), + ("read_only_space", 0x037d5): (193, "InternalClassWithSmiElementsMap"), + ("read_only_space", 0x037fd): (254, "InternalClassWithStructElementsMap"), + ("read_only_space", 0x03825): (245, "CodeMap"), + ("read_only_space", 0x0384d): (244, "CellMap"), + ("read_only_space", 0x0387d): (260, "GlobalPropertyCellMap"), + ("read_only_space", 0x038a5): (251, "OnePointerFillerMap"), + ("read_only_space", 0x038cd): (251, "TwoPointerFillerMap"), + ("read_only_space", 0x038f5): (156, "NoClosuresCellMap"), + ("read_only_space", 0x0391d): (156, "OneClosureCellMap"), + ("read_only_space", 0x03945): (156, "ManyClosuresCellMap"), + ("read_only_space", 0x0396d): (241, "TransitionArrayMap"), + ("read_only_space", 0x03995): (176, "HashTableMap"), + ("read_only_space", 0x039bd): (182, "OrderedHashMapMap"), + ("read_only_space", 0x039e5): (183, "OrderedHashSetMap"), + ("read_only_space", 0x03a0d): (184, "OrderedNameDictionaryMap"), + ("read_only_space", 0x03a35): (179, "NameDictionaryMap"), + ("read_only_space", 0x03a5d): (266, "SwissNameDictionaryMap"), + ("read_only_space", 0x03a85): (178, "GlobalDictionaryMap"), + ("read_only_space", 0x03aad): (181, "NumberDictionaryMap"), + ("read_only_space", 0x03ad5): (186, "SimpleNumberDictionaryMap"), + ("read_only_space", 0x03afd): (180, "NameToIndexHashTableMap"), + ("read_only_space", 0x03b25): (185, "RegisteredSymbolTableMap"), + ("read_only_space", 0x03b4d): (248, "EmbedderDataArrayMap"), + ("read_only_space", 0x03b75): (177, "EphemeronHashTableMap"), + ("read_only_space", 0x03b9d): (175, "ArrayListMap"), + ("read_only_space", 0x03bc5): (189, "ScriptContextTableMap"), + ("read_only_space", 0x03bed): (188, "ObjectBoilerplateDescriptionMap"), + ("read_only_space", 0x03c15): (247, "CoverageInfoMap"), + ("read_only_space", 0x03c3d): (242, "AccessorInfoMap"), + ("read_only_space", 0x03c65): (243, "SideEffectCallHandlerInfoMap"), + ("read_only_space", 0x03c8d): (243, "SideEffectFreeCallHandlerInfoMap"), + ("read_only_space", 0x03cb5): (243, "NextCallSideEffectFreeCallHandlerInfoMap"), + ("read_only_space", 0x03cdd): (258, "PreparseDataMap"), + ("read_only_space", 0x03d05): (262, "SharedFunctionInfoMap"), + ("read_only_space", 0x03d2d): (238, "SourceTextModuleMap"), + ("read_only_space", 0x03d55): (239, "SyntheticModuleMap"), + ("read_only_space", 0x03d7d): (246, "CodeDataContainerMap"), + ("read_only_space", 0x03da5): (267, "WasmApiFunctionRefMap"), + ("read_only_space", 0x03dcd): (225, "WasmCapiFunctionDataMap"), + ("read_only_space", 0x03df5): (226, "WasmExportedFunctionDataMap"), + ("read_only_space", 0x03e1d): (269, "WasmInternalFunctionMap"), + ("read_only_space", 0x03e45): (227, "WasmJSFunctionDataMap"), + ("read_only_space", 0x03e6d): (270, "WasmResumeDataMap"), + ("read_only_space", 0x03e95): (272, "WasmTypeInfoMap"), + ("read_only_space", 0x03ebd): (268, "WasmContinuationObjectMap"), + ("read_only_space", 0x03ee5): (274, "WeakCellMap"), ("old_space", 0x0438d): (2116, "ExternalMap"), ("old_space", 0x043b5): (2120, "JSMessageObjectMap"), } # List of known V8 objects. KNOWN_OBJECTS = { - ("read_only_space", 0x021e1): "EmptyWeakArrayList", - ("read_only_space", 0x021ed): "EmptyDescriptorArray", - ("read_only_space", 0x02225): "EmptyEnumCache", - ("read_only_space", 0x02259): "EmptyFixedArray", - ("read_only_space", 0x02261): "NullValue", - ("read_only_space", 0x02369): "UninitializedValue", - ("read_only_space", 0x023e1): "UndefinedValue", - ("read_only_space", 0x02425): "NanValue", - ("read_only_space", 0x02459): "TheHoleValue", - ("read_only_space", 0x02485): "HoleNanValue", - ("read_only_space", 0x024b9): "TrueValue", - ("read_only_space", 0x024f9): "FalseValue", - ("read_only_space", 0x02529): "empty_string", - ("read_only_space", 0x02765): "EmptyScopeInfo", - ("read_only_space", 0x0279d): "ArgumentsMarker", - ("read_only_space", 0x027fd): "Exception", - ("read_only_space", 0x02859): "TerminationException", - ("read_only_space", 0x028c1): "OptimizedOut", - ("read_only_space", 0x02921): "StaleRegister", - ("read_only_space", 0x034c1): "EmptyPropertyArray", - ("read_only_space", 0x034c9): "EmptyByteArray", - ("read_only_space", 0x034d1): "EmptyObjectBoilerplateDescription", - ("read_only_space", 0x03505): "EmptyArrayBoilerplateDescription", - ("read_only_space", 0x03511): "EmptyClosureFeedbackCellArray", - ("read_only_space", 0x03519): "EmptySlowElementDictionary", - ("read_only_space", 0x0353d): "EmptyOrderedHashMap", - ("read_only_space", 0x03551): "EmptyOrderedHashSet", - ("read_only_space", 0x03565): "EmptyFeedbackMetadata", - ("read_only_space", 0x03571): "EmptyPropertyDictionary", - ("read_only_space", 0x03599): "EmptyOrderedPropertyDictionary", - ("read_only_space", 0x035b1): "EmptySwissPropertyDictionary", - ("read_only_space", 0x03605): "NoOpInterceptorInfo", - ("read_only_space", 0x0362d): "EmptyArrayList", - ("read_only_space", 0x03639): "EmptyWeakFixedArray", - ("read_only_space", 0x03641): "InvalidPrototypeValidityCell", - ("read_only_space", 0x03649): "InfinityValue", - ("read_only_space", 0x03655): "MinusZeroValue", - ("read_only_space", 0x03661): "MinusInfinityValue", - ("read_only_space", 0x0366d): "MaxSafeInteger", - ("read_only_space", 0x03679): "MaxUInt32", - ("read_only_space", 0x03685): "SmiMinValue", - ("read_only_space", 0x03691): "SmiMaxValuePlusOne", - ("read_only_space", 0x0369d): "SingleCharacterStringTable", - ("read_only_space", 0x04aa5): "SelfReferenceMarker", - ("read_only_space", 0x04ae5): "BasicBlockCountersMarker", - ("read_only_space", 0x04b29): "OffHeapTrampolineRelocationInfo", - ("read_only_space", 0x04b35): "GlobalThisBindingScopeInfo", - ("read_only_space", 0x04b65): "EmptyFunctionScopeInfo", - ("read_only_space", 0x04b89): "NativeScopeInfo", - ("read_only_space", 0x04ba1): "ShadowRealmScopeInfo", - ("read_only_space", 0x04bb9): "EmptySymbolTable", - ("read_only_space", 0x04bd5): "HashSeed", + ("read_only_space", 0x022a9): "EmptyFixedArray", + ("read_only_space", 0x022b1): "EmptyWeakFixedArray", + ("read_only_space", 0x022b9): "EmptyWeakArrayList", + ("read_only_space", 0x022c5): "NullValue", + ("read_only_space", 0x022e1): "UndefinedValue", + ("read_only_space", 0x022fd): "TheHoleValue", + ("read_only_space", 0x02a99): "EmptyEnumCache", + ("read_only_space", 0x02aa5): "EmptyDescriptorArray", + ("read_only_space", 0x03875): "InvalidPrototypeValidityCell", + ("read_only_space", 0x03f0d): "EmptyArrayList", + ("read_only_space", 0x03f19): "EmptyScopeInfo", + ("read_only_space", 0x03f29): "EmptyObjectBoilerplateDescription", + ("read_only_space", 0x03f35): "EmptyArrayBoilerplateDescription", + ("read_only_space", 0x03f41): "TrueValue", + ("read_only_space", 0x03f5d): "FalseValue", + ("read_only_space", 0x03f79): "EmptyByteArray", + ("read_only_space", 0x03f81): "EmptyPropertyArray", + ("read_only_space", 0x03f89): "EmptyClosureFeedbackCellArray", + ("read_only_space", 0x03f91): "NoOpInterceptorInfo", + ("read_only_space", 0x03fb9): "MinusZeroValue", + ("read_only_space", 0x03fc5): "NanValue", + ("read_only_space", 0x03fd1): "HoleNanValue", + ("read_only_space", 0x03fdd): "InfinityValue", + ("read_only_space", 0x03fe9): "MinusInfinityValue", + ("read_only_space", 0x03ff5): "MaxSafeInteger", + ("read_only_space", 0x04001): "MaxUInt32", + ("read_only_space", 0x0400d): "SmiMinValue", + ("read_only_space", 0x04019): "SmiMaxValuePlusOne", + ("read_only_space", 0x04025): "HashSeed", + ("read_only_space", 0x04035): "SingleCharacterStringTable", + ("read_only_space", 0x0543d): "empty_string", + ("read_only_space", 0x07b19): "UninitializedValue", + ("read_only_space", 0x07b51): "ArgumentsMarker", + ("read_only_space", 0x07b89): "TerminationException", + ("read_only_space", 0x07bc9): "Exception", + ("read_only_space", 0x07be5): "OptimizedOut", + ("read_only_space", 0x07c1d): "StaleRegister", + ("read_only_space", 0x07c55): "SelfReferenceMarker", + ("read_only_space", 0x07c95): "BasicBlockCountersMarker", + ("read_only_space", 0x081c9): "EmptyPropertyDictionary", + ("read_only_space", 0x081f1): "EmptySymbolTable", + ("read_only_space", 0x0820d): "EmptySlowElementDictionary", + ("read_only_space", 0x08231): "EmptyOrderedHashMap", + ("read_only_space", 0x08245): "EmptyOrderedHashSet", + ("read_only_space", 0x08259): "EmptyOrderedPropertyDictionary", + ("read_only_space", 0x0827d): "EmptySwissPropertyDictionary", + ("read_only_space", 0x0829d): "EmptyFeedbackMetadata", + ("read_only_space", 0x082a9): "GlobalThisBindingScopeInfo", + ("read_only_space", 0x082c9): "EmptyFunctionScopeInfo", + ("read_only_space", 0x082ed): "NativeScopeInfo", + ("read_only_space", 0x08305): "ShadowRealmScopeInfo", + ("read_only_space", 0x0831d): "OffHeapTrampolineRelocationInfo", ("old_space", 0x0423d): "ArgumentsIteratorAccessor", ("old_space", 0x04255): "ArrayLengthAccessor", ("old_space", 0x0426d): "BoundFunctionLengthAccessor", From bd1562caa23a18310dea69f4eb51b686e0a9c235 Mon Sep 17 00:00:00 2001 From: Manos Koukoutos Date: Thu, 22 Dec 2022 13:35:20 +0100 Subject: [PATCH 041/654] [wasm] Pass module to wrapper compilation Pass the {WasmModule} in the {CompilationEnv} argument in {CompileWasmImportCallWrapper} to {BuildWasmToJSWrapper}, which in turn passes it to {FromJS}. Bug: chromium:1401934 Change-Id: I8d689d881e4d8013cd4dcb6cfb54d663d1c4ec38 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4120443 Commit-Queue: Clemens Backes Reviewed-by: Clemens Backes Auto-Submit: Manos Koukoutos Cr-Commit-Position: refs/heads/main@{#84992} --- src/compiler/wasm-compiler.cc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index 8e8e45247a..671189194b 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -7169,7 +7169,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // For wasm-to-js wrappers, parameter 0 is a WasmApiFunctionRef. bool BuildWasmToJSWrapper(WasmImportCallKind kind, int expected_arity, - wasm::Suspend suspend) { + wasm::Suspend suspend, + const wasm::WasmModule* module) { int wasm_count = static_cast(sig_->parameter_count()); // Build the start and the parameter nodes. @@ -7322,10 +7323,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // Convert the return value(s) back. if (sig_->return_count() <= 1) { - Node* val = - sig_->return_count() == 0 - ? Int32Constant(0) - : FromJS(call, native_context, sig_->GetReturn(), nullptr); + Node* val = sig_->return_count() == 0 + ? Int32Constant(0) + : FromJS(call, native_context, sig_->GetReturn(), module); BuildModifyThreadInWasmFlag(true); Return(val); } else { @@ -7334,7 +7334,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { base::SmallVector wasm_values(sig_->return_count()); for (unsigned i = 0; i < sig_->return_count(); ++i) { wasm_values[i] = FromJS(gasm_->LoadFixedArrayElementAny(fixed_array, i), - native_context, sig_->GetReturn(i), nullptr); + native_context, sig_->GetReturn(i), module); } BuildModifyThreadInWasmFlag(true); Return(base::VectorOf(wasm_values)); @@ -8238,7 +8238,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper( &zone, mcgraph, sig, env->module, WasmGraphBuilder::kWasmApiFunctionRefMode, nullptr, source_position_table, StubCallMode::kCallWasmRuntimeStub, env->enabled_features); - builder.BuildWasmToJSWrapper(kind, expected_arity, suspend); + builder.BuildWasmToJSWrapper(kind, expected_arity, suspend, env->module); // Build a name in the form "wasm-to-js--". constexpr size_t kMaxNameLen = 128; @@ -8391,7 +8391,7 @@ MaybeHandle CompileWasmToJSWrapper(Isolate* isolate, nullptr, nullptr, StubCallMode::kCallBuiltinPointer, wasm::WasmFeatures::FromIsolate(isolate)); - builder.BuildWasmToJSWrapper(kind, expected_arity, suspend); + builder.BuildWasmToJSWrapper(kind, expected_arity, suspend, nullptr); // Build a name in the form "wasm-to-js--". constexpr size_t kMaxNameLen = 128; From e17eee4894be67f715a7b2d7f17d8b69724f1cf8 Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Thu, 22 Dec 2022 09:43:42 +0100 Subject: [PATCH 042/654] [wasm] Fix printing of wasm-to-js frames After https://crrev.com/c/3859787 those frames would be printed like standard Wasm frames, but in the place of the WasmInstanceObject, they have a WasmApiFunctionRef object instead. So special-case the {WasmToJsFrame::instance()} to load the instance properly. Also special-case the {position()} accessor for imported functions. R=victorgomes@chromium.org Bug: chromium:1402270 Change-Id: I39805805a50e7a73d7d8075c63c46bdf5a373a33 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4116778 Reviewed-by: Nico Hartmann Commit-Queue: Clemens Backes Reviewed-by: Victor Gomes Cr-Commit-Position: refs/heads/main@{#84993} --- src/compiler/backend/arm/code-generator-arm.cc | 4 ++++ .../backend/arm64/code-generator-arm64.cc | 3 +++ src/compiler/backend/ia32/code-generator-ia32.cc | 4 ++++ src/compiler/backend/x64/code-generator-x64.cc | 8 ++++---- src/diagnostics/objects-printer.cc | 1 + src/execution/frames.cc | 11 ++++++++++- src/execution/frames.h | 5 ++++- test/mjsunit/regress/asm/regress-1402270.js | 16 ++++++++++++++++ 8 files changed, 46 insertions(+), 6 deletions(-) create mode 100644 test/mjsunit/regress/asm/regress-1402270.js diff --git a/src/compiler/backend/arm/code-generator-arm.cc b/src/compiler/backend/arm/code-generator-arm.cc index 51dbd89955..5ab345adf2 100644 --- a/src/compiler/backend/arm/code-generator-arm.cc +++ b/src/compiler/backend/arm/code-generator-arm.cc @@ -3699,6 +3699,10 @@ void CodeGenerator::AssembleConstructFrame() { if (call_descriptor->IsWasmFunctionCall() || call_descriptor->IsWasmImportWrapper() || call_descriptor->IsWasmCapiFunction()) { + // For import wrappers and C-API functions, this stack slot is only used + // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef + // instead of the instance itself, which is taken care of in the frames + // accessors. __ Push(kWasmInstanceRegister); } if (call_descriptor->IsWasmCapiFunction()) { diff --git a/src/compiler/backend/arm64/code-generator-arm64.cc b/src/compiler/backend/arm64/code-generator-arm64.cc index 1889a7b680..c4488452f3 100644 --- a/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/src/compiler/backend/arm64/code-generator-arm64.cc @@ -3224,6 +3224,9 @@ void CodeGenerator::AssembleConstructFrame() { Register scratch = temps.AcquireX(); __ Mov(scratch, StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); + // This stack slot is only used for printing stack traces in V8. Also, + // it holds a WasmApiFunctionRef instead of the instance itself, which + // is taken care of in the frames accessors. __ Push(scratch, kWasmInstanceRegister); int extra_slots = call_descriptor->kind() == CallDescriptor::kCallWasmImportWrapper diff --git a/src/compiler/backend/ia32/code-generator-ia32.cc b/src/compiler/backend/ia32/code-generator-ia32.cc index b53adf286f..865b890c92 100644 --- a/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/src/compiler/backend/ia32/code-generator-ia32.cc @@ -4043,6 +4043,10 @@ void CodeGenerator::AssembleConstructFrame() { if (call_descriptor->IsWasmFunctionCall() || call_descriptor->IsWasmImportWrapper() || call_descriptor->IsWasmCapiFunction()) { + // For import wrappers and C-API functions, this stack slot is only used + // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef + // instead of the instance itself, which is taken care of in the frames + // accessors. __ push(kWasmInstanceRegister); } if (call_descriptor->IsWasmCapiFunction()) { diff --git a/src/compiler/backend/x64/code-generator-x64.cc b/src/compiler/backend/x64/code-generator-x64.cc index 4bf367bd5b..9e80816204 100644 --- a/src/compiler/backend/x64/code-generator-x64.cc +++ b/src/compiler/backend/x64/code-generator-x64.cc @@ -4859,10 +4859,10 @@ void CodeGenerator::AssembleConstructFrame() { if (call_descriptor->IsWasmFunctionCall() || call_descriptor->IsWasmImportWrapper() || call_descriptor->IsWasmCapiFunction()) { - // We do not use this stack value in import wrappers and capi functions. - // We push it anyway to satisfy legacy assumptions about these frames' - // size and order. - // TODO(manoskouk): Consider fixing this. + // For import wrappers and C-API functions, this stack slot is only used + // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef + // instead of the instance itself, which is taken care of in the frames + // accessors. __ pushq(kWasmInstanceRegister); } if (call_descriptor->IsWasmCapiFunction()) { diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index 4106b524f1..b763468f58 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -2156,6 +2156,7 @@ void WasmApiFunctionRef::WasmApiFunctionRefPrint(std::ostream& os) { os << "\n - isolate_root: " << reinterpret_cast(isolate_root()); os << "\n - native_context: " << Brief(native_context()); os << "\n - callable: " << Brief(callable()); + os << "\n - instance: " << Brief(instance()); os << "\n - suspend: " << suspend(); os << "\n"; } diff --git a/src/execution/frames.cc b/src/execution/frames.cc index 7af7ae9f13..1730323e71 100644 --- a/src/execution/frames.cc +++ b/src/execution/frames.cc @@ -2501,7 +2501,7 @@ void WasmFrame::Print(StringStream* accumulator, PrintMode mode, return; } wasm::WasmCodeRefScope code_ref_scope; - accumulator->Add("Wasm ["); + accumulator->Add(is_wasm_to_js() ? "Wasm-to-JS [" : "Wasm ["); accumulator->PrintName(script().name()); Address instruction_start = wasm_code()->instruction_start(); base::Vector raw_func_name = @@ -2632,6 +2632,15 @@ void WasmDebugBreakFrame::Print(StringStream* accumulator, PrintMode mode, if (mode != OVERVIEW) accumulator->Add("\n"); } +WasmInstanceObject WasmToJsFrame::wasm_instance() const { + // WasmToJsFrames hold the {WasmApiFunctionRef} object in the instance slot. + // Load the instance from there. + const int offset = WasmFrameConstants::kWasmInstanceOffset; + Object func_ref_obj(Memory
(fp() + offset)); + WasmApiFunctionRef func_ref = WasmApiFunctionRef::cast(func_ref_obj); + return WasmInstanceObject::cast(func_ref.instance()); +} + void JsToWasmFrame::Iterate(RootVisitor* v) const { CodeLookupResult lookup_result = GetContainingCode(isolate(), pc()); CHECK(lookup_result.IsFound()); diff --git a/src/execution/frames.h b/src/execution/frames.h index 536b044100..4bed950bb5 100644 --- a/src/execution/frames.h +++ b/src/execution/frames.h @@ -1035,7 +1035,7 @@ class WasmFrame : public TypedFrame { void Iterate(RootVisitor* v) const override; // Accessors. - V8_EXPORT_PRIVATE WasmInstanceObject wasm_instance() const; + virtual V8_EXPORT_PRIVATE WasmInstanceObject wasm_instance() const; V8_EXPORT_PRIVATE wasm::NativeModule* native_module() const; wasm::WasmCode* wasm_code() const; int function_index() const; @@ -1101,6 +1101,9 @@ class WasmToJsFrame : public WasmFrame { public: Type type() const override { return WASM_TO_JS; } + int position() const override { return 0; } + WasmInstanceObject wasm_instance() const override; + protected: inline explicit WasmToJsFrame(StackFrameIteratorBase* iterator); diff --git a/test/mjsunit/regress/asm/regress-1402270.js b/test/mjsunit/regress/asm/regress-1402270.js new file mode 100644 index 0000000000..77badd768f --- /dev/null +++ b/test/mjsunit/regress/asm/regress-1402270.js @@ -0,0 +1,16 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +function print_stack(unused_arg) { + console.trace(); +} +function asm(_, imports) { + 'use asm'; + var print_stack = imports.print_stack; + function f() { + print_stack(1); + } + return f; +} +asm({}, {'print_stack': print_stack})(); From 7396b09771aa8d003b09ee9e8ac33772f0f4c2c9 Mon Sep 17 00:00:00 2001 From: Qifan Pan Date: Fri, 16 Dec 2022 15:34:08 +0100 Subject: [PATCH 043/654] [turbofan] Collect BigInt64 feedback for tagged equality Collect BigInt64 feedback when lhs and rhs reference the same value. Bug: v8:9407 Change-Id: I1045d839da28b432fc343cbf9c98915509b7ed71 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111602 Reviewed-by: Nico Hartmann Commit-Queue: Qifan Pan Cr-Commit-Position: refs/heads/main@{#84994} --- src/codegen/code-stub-assembler.cc | 8 ++++++++ test/mjsunit/compiler/bigint-equal.js | 3 +++ 2 files changed, 11 insertions(+) diff --git a/src/codegen/code-stub-assembler.cc b/src/codegen/code-stub-assembler.cc index a8a69133a0..89ed442d74 100644 --- a/src/codegen/code-stub-assembler.cc +++ b/src/codegen/code-stub-assembler.cc @@ -13016,6 +13016,14 @@ void CodeStubAssembler::GenerateEqual_Same(TNode value, Label* if_equal, BIND(&if_bigint); { CSA_DCHECK(this, IsBigInt(value_heapobject)); + + if (Is64()) { + Label if_large_bigint(this); + GotoIfLargeBigInt(CAST(value_heapobject), &if_large_bigint); + CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt64); + Goto(if_equal); + BIND(&if_large_bigint); + } CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt); Goto(if_equal); } diff --git a/test/mjsunit/compiler/bigint-equal.js b/test/mjsunit/compiler/bigint-equal.js index 8133147295..dbbd745afa 100644 --- a/test/mjsunit/compiler/bigint-equal.js +++ b/test/mjsunit/compiler/bigint-equal.js @@ -4,6 +4,8 @@ // Flags: --allow-natives-syntax --turbofan --no-always-turbofan +const bi = 42n; + function Equal(x, y) { return x == y; } @@ -16,6 +18,7 @@ function Test(f, large) { assertEquals(false, f(1n, 2n)); assertEquals(false, f(1n, -1n)); assertEquals(true, f(-1n, -1n)); + assertEquals(true, f(bi, bi)); assertEquals(false, f(2n ** 63n - 1n, -(2n ** 63n) + 1n)); if (large) { assertEquals(false, f(2n ** 63n, -(2n ** 63n))); From 6c90e3a9b3e0df4bf6aaa114c3ca258c30036c19 Mon Sep 17 00:00:00 2001 From: Qifan Pan Date: Mon, 19 Dec 2022 17:49:23 +0100 Subject: [PATCH 044/654] [turbofan] Support the rest of the BigInt comparisons This CL introduced four more operators in TurboFan: BigIntLessThan, BigIntLessThanOrEqual, and the corresponding speculative operators so that all the BigInt comparisons are supported. This CL also implemented fast paths for small BigInt inputs. Bug: v8:9407 Change-Id: Iaa2d4dbebf68656d775a7feb65b97fb5c598ec23 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111951 Commit-Queue: Qifan Pan Reviewed-by: Nico Hartmann Cr-Commit-Position: refs/heads/main@{#84995} --- src/builtins/builtins-bigint.tq | 57 ++++++++++++++ src/codegen/code-stub-assembler.cc | 96 +++++++++++++++++------ src/codegen/code-stub-assembler.h | 4 + src/compiler/effect-control-linearizer.cc | 40 ++++++++++ src/compiler/js-type-hint-lowering.cc | 16 +++- src/compiler/opcodes.h | 6 +- src/compiler/representation-change.cc | 8 ++ src/compiler/simplified-lowering.cc | 4 +- src/compiler/simplified-operator.cc | 19 +++-- src/compiler/simplified-operator.h | 4 + src/compiler/typer.cc | 22 +++--- src/compiler/verifier.cc | 4 + test/mjsunit/compiler/bigint-compare.js | 61 ++++++++++++++ 13 files changed, 291 insertions(+), 50 deletions(-) create mode 100644 test/mjsunit/compiler/bigint-compare.js diff --git a/src/builtins/builtins-bigint.tq b/src/builtins/builtins-bigint.tq index ade83dc965..c8748c70bb 100644 --- a/src/builtins/builtins-bigint.tq +++ b/src/builtins/builtins-bigint.tq @@ -8,6 +8,8 @@ namespace bigint { const kPositiveSign: uint32 = 0; const kNegativeSign: uint32 = 1; +const kGreaterThan: intptr = 1; +const kLessThan: intptr = -1; const kMustRoundDownBitShift: uint32 = 30; @@ -798,6 +800,61 @@ builtin BigIntEqual(implicit context: Context)(x: BigInt, y: BigInt): Boolean { return True; } +// Returns r such that r < 0 if |x| < |y|; r > 0 if |x| > |y|; +// r == 0 if |x| == |y|. +macro BigIntCompareAbsolute(implicit context: Context)( + x: BigInt, y: BigInt): intptr { + const xlength = ReadBigIntLength(x); + const ylength = ReadBigIntLength(y); + const diff = xlength - ylength; + if (diff != 0) { + return diff; + } + + // case: {xlength} == {ylength} + for (let i: intptr = xlength - 1; i >= 0; --i) { + const xdigit = LoadBigIntDigit(x, i); + const ydigit = LoadBigIntDigit(y, i); + if (xdigit != ydigit) { + return (xdigit > ydigit) ? kGreaterThan : kLessThan; + } + } + return 0; +} + +// Returns r such that r < 0 if x < y; r > 0 if x > y; r == 0 if x == y. +macro BigIntCompare(implicit context: Context)(x: BigInt, y: BigInt): intptr { + const xsign = ReadBigIntSign(x); + const ysign = ReadBigIntSign(y); + if (xsign != ysign) { + return xsign == kPositiveSign ? kGreaterThan : kLessThan; + } + + // case: {xsign} == {ysign} + const diff = BigIntCompareAbsolute(x, y); + return xsign == kPositiveSign ? diff : 0 - diff; +} + +builtin BigIntLessThan(implicit context: Context)( + x: BigInt, y: BigInt): Boolean { + return BigIntCompare(x, y) < 0 ? True : False; +} + +builtin BigIntGreaterThan(implicit context: Context)( + x: BigInt, y: BigInt): Boolean { + return BigIntCompare(x, y) > 0 ? True : False; +} + +builtin BigIntLessThanOrEqual(implicit context: Context)( + x: BigInt, y: BigInt): Boolean { + return BigIntCompare(x, y) <= 0 ? True : False; +} + +builtin BigIntGreaterThanOrEqual(implicit context: Context)( + x: BigInt, y: BigInt): Boolean { + return BigIntCompare(x, y) >= 0 ? True : False; +} + builtin BigIntUnaryMinus(implicit context: Context)(bigint: BigInt): BigInt { const length = ReadBigIntLength(bigint); diff --git a/src/codegen/code-stub-assembler.cc b/src/codegen/code-stub-assembler.cc index 89ed442d74..3609a19b19 100644 --- a/src/codegen/code-stub-assembler.cc +++ b/src/codegen/code-stub-assembler.cc @@ -36,6 +36,25 @@ namespace v8 { namespace internal { +namespace { + +Builtin BigIntComparisonBuiltinOf(Operation const& op) { + switch (op) { + case Operation::kLessThan: + return Builtin::kBigIntLessThan; + case Operation::kGreaterThan: + return Builtin::kBigIntGreaterThan; + case Operation::kLessThanOrEqual: + return Builtin::kBigIntLessThanOrEqual; + case Operation::kGreaterThanOrEqual: + return Builtin::kBigIntGreaterThanOrEqual; + default: + UNREACHABLE(); + } +} + +} // namespace + CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state) : compiler::CodeAssembler(state), TorqueGeneratedExportedMacrosAssembler(state) { @@ -12531,6 +12550,41 @@ TNode CodeStubAssembler::GotoIfHasContextExtensionUpToDepth( return cur_context.value(); } +void CodeStubAssembler::BigInt64Comparison(Operation op, TNode& left, + TNode& right, + Label* return_true, + Label* return_false) { + TVARIABLE(UintPtrT, left_raw); + TVARIABLE(UintPtrT, right_raw); + BigIntToRawBytes(CAST(left), &left_raw, &left_raw); + BigIntToRawBytes(CAST(right), &right_raw, &right_raw); + TNode left_raw_value = left_raw.value(); + TNode right_raw_value = right_raw.value(); + + TNode condition; + switch (op) { + case Operation::kEqual: + case Operation::kStrictEqual: + condition = WordEqual(left_raw_value, right_raw_value); + break; + case Operation::kLessThan: + condition = IntPtrLessThan(left_raw_value, right_raw_value); + break; + case Operation::kLessThanOrEqual: + condition = IntPtrLessThanOrEqual(left_raw_value, right_raw_value); + break; + case Operation::kGreaterThan: + condition = IntPtrGreaterThan(left_raw_value, right_raw_value); + break; + case Operation::kGreaterThanOrEqual: + condition = IntPtrGreaterThanOrEqual(left_raw_value, right_raw_value); + break; + default: + UNREACHABLE(); + } + Branch(condition, return_true, return_false); +} + TNode CodeStubAssembler::RelationalComparison( Operation op, TNode left, TNode right, const LazyNode& context, TVariable* var_type_feedback) { @@ -12753,11 +12807,21 @@ TNode CodeStubAssembler::RelationalComparison( BIND(&if_right_bigint); { + if (Is64()) { + Label if_both_bigint(this); + GotoIfLargeBigInt(CAST(left), &if_both_bigint); + GotoIfLargeBigInt(CAST(right), &if_both_bigint); + + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kBigInt64); + BigInt64Comparison(op, left, right, &return_true, &return_false); + BIND(&if_both_bigint); + } + CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt); - var_result = CAST(CallRuntime(Runtime::kBigIntCompareToBigInt, - NoContextConstant(), SmiConstant(op), - left, right)); + var_result = CAST(CallBuiltin(BigIntComparisonBuiltinOf(op), + NoContextConstant(), left, right)); Goto(&end); } @@ -13288,22 +13352,13 @@ TNode CodeStubAssembler::Equal(TNode left, TNode right, { if (Is64()) { Label if_both_bigint(this); - GotoIfLargeBigInt(CAST(left), &if_both_bigint); GotoIfLargeBigInt(CAST(right), &if_both_bigint); OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kBigInt64); - - TVARIABLE(UintPtrT, left_raw); - TVARIABLE(UintPtrT, right_raw); - BigIntToRawBytes(CAST(left), &left_raw, &left_raw); - BigIntToRawBytes(CAST(right), &right_raw, &right_raw); - - Branch(WordEqual(UncheckedCast(left_raw.value()), - UncheckedCast(right_raw.value())), - &if_equal, &if_notequal); - + BigInt64Comparison(Operation::kEqual, left, right, &if_equal, + &if_notequal); BIND(&if_both_bigint); } @@ -13731,22 +13786,13 @@ TNode CodeStubAssembler::StrictEqual( { if (Is64()) { Label if_both_bigint(this); - GotoIfLargeBigInt(CAST(lhs), &if_both_bigint); GotoIfLargeBigInt(CAST(rhs), &if_both_bigint); OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kBigInt64); - - TVARIABLE(UintPtrT, lhs_raw); - TVARIABLE(UintPtrT, rhs_raw); - BigIntToRawBytes(CAST(lhs), &lhs_raw, &lhs_raw); - BigIntToRawBytes(CAST(rhs), &rhs_raw, &rhs_raw); - - Branch(WordEqual(UncheckedCast(lhs_raw.value()), - UncheckedCast(rhs_raw.value())), - &if_equal, &if_notequal); - + BigInt64Comparison(Operation::kStrictEqual, lhs, rhs, + &if_equal, &if_notequal); BIND(&if_both_bigint); } diff --git a/src/codegen/code-stub-assembler.h b/src/codegen/code-stub-assembler.h index e67e7ad5ef..e59fdef9fc 100644 --- a/src/codegen/code-stub-assembler.h +++ b/src/codegen/code-stub-assembler.h @@ -4256,6 +4256,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler private: friend class CodeStubArguments; + void BigInt64Comparison(Operation op, TNode& left, + TNode& right, Label* return_true, + Label* return_false); + void HandleBreakOnNode(); TNode AllocateRawDoubleAligned(TNode size_in_bytes, diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc index 021c69419a..edc968302f 100644 --- a/src/compiler/effect-control-linearizer.cc +++ b/src/compiler/effect-control-linearizer.cc @@ -191,6 +191,8 @@ class EffectControlLinearizer { Node* LowerBigIntShiftLeft(Node* node, Node* frame_state); Node* LowerBigIntShiftRight(Node* node, Node* frame_state); Node* LowerBigIntEqual(Node* node); + Node* LowerBigIntLessThan(Node* node); + Node* LowerBigIntLessThanOrEqual(Node* node); Node* LowerBigIntNegate(Node* node); Node* LowerCheckFloat64Hole(Node* node, Node* frame_state); Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state); @@ -1306,6 +1308,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kBigIntEqual: result = LowerBigIntEqual(node); break; + case IrOpcode::kBigIntLessThan: + result = LowerBigIntLessThan(node); + break; + case IrOpcode::kBigIntLessThanOrEqual: + result = LowerBigIntLessThanOrEqual(node); + break; case IrOpcode::kBigIntNegate: result = LowerBigIntNegate(node); break; @@ -4822,6 +4830,38 @@ Node* EffectControlLinearizer::LowerBigIntEqual(Node* node) { return value; } +Node* EffectControlLinearizer::LowerBigIntLessThan(Node* node) { + Node* lhs = node->InputAt(0); + Node* rhs = node->InputAt(1); + + Callable const callable = + Builtins::CallableFor(isolate(), Builtin::kBigIntLessThan); + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, + Operator::kFoldable | Operator::kNoThrow); + Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, + rhs, __ NoContextConstant()); + + return value; +} + +Node* EffectControlLinearizer::LowerBigIntLessThanOrEqual(Node* node) { + Node* lhs = node->InputAt(0); + Node* rhs = node->InputAt(1); + + Callable const callable = + Builtins::CallableFor(isolate(), Builtin::kBigIntLessThanOrEqual); + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, + Operator::kFoldable | Operator::kNoThrow); + Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, + rhs, __ NoContextConstant()); + + return value; +} + Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) { Callable const callable = Builtins::CallableFor(isolate(), Builtin::kBigIntUnaryMinus); diff --git a/src/compiler/js-type-hint-lowering.cc b/src/compiler/js-type-hint-lowering.cc index 941bc682fe..904dee0dc5 100644 --- a/src/compiler/js-type-hint-lowering.cc +++ b/src/compiler/js-type-hint-lowering.cc @@ -231,6 +231,16 @@ class JSSpeculativeBinopBuilder final { switch (op_->opcode()) { case IrOpcode::kJSEqual: return simplified()->SpeculativeBigIntEqual(hint); + case IrOpcode::kJSLessThan: + return simplified()->SpeculativeBigIntLessThan(hint); + case IrOpcode::kJSGreaterThan: + std::swap(left_, right_); + return simplified()->SpeculativeBigIntLessThan(hint); + case IrOpcode::kJSLessThanOrEqual: + return simplified()->SpeculativeBigIntLessThanOrEqual(hint); + case IrOpcode::kJSGreaterThanOrEqual: + std::swap(left_, right_); + return simplified()->SpeculativeBigIntLessThanOrEqual(hint); default: break; } @@ -433,10 +443,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation( if (Node* node = b.TryBuildNumberCompare()) { return LoweringResult::SideEffectFree(node, node, control); } - if (op->opcode() == IrOpcode::kJSEqual) { - if (Node* node = b.TryBuildBigIntCompare()) { - return LoweringResult::SideEffectFree(node, node, control); - } + if (Node* node = b.TryBuildBigIntCompare()) { + return LoweringResult::SideEffectFree(node, node, control); } break; } diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h index 8cc6fcce05..a022d3b6b2 100644 --- a/src/compiler/opcodes.h +++ b/src/compiler/opcodes.h @@ -331,7 +331,11 @@ V(StringLessThan) \ V(StringLessThanOrEqual) \ V(BigIntEqual) \ - V(SpeculativeBigIntEqual) + V(BigIntLessThan) \ + V(BigIntLessThanOrEqual) \ + V(SpeculativeBigIntEqual) \ + V(SpeculativeBigIntLessThan) \ + V(SpeculativeBigIntLessThanOrEqual) #define SIMPLIFIED_NUMBER_BINOP_LIST(V) \ V(NumberAdd) \ diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc index 77fea58d3b..5182369fcf 100644 --- a/src/compiler/representation-change.cc +++ b/src/compiler/representation-change.cc @@ -1387,6 +1387,10 @@ const Operator* RepresentationChanger::Int64OperatorFor( return machine()->Word64Xor(); case IrOpcode::kSpeculativeBigIntEqual: return machine()->Word64Equal(); + case IrOpcode::kSpeculativeBigIntLessThan: + return machine()->Int64LessThan(); + case IrOpcode::kSpeculativeBigIntLessThanOrEqual: + return machine()->Int64LessThanOrEqual(); default: UNREACHABLE(); } @@ -1435,6 +1439,10 @@ const Operator* RepresentationChanger::BigIntOperatorFor( return simplified()->BigIntShiftRight(); case IrOpcode::kSpeculativeBigIntEqual: return simplified()->BigIntEqual(); + case IrOpcode::kSpeculativeBigIntLessThan: + return simplified()->BigIntLessThan(); + case IrOpcode::kSpeculativeBigIntLessThanOrEqual: + return simplified()->BigIntLessThanOrEqual(); default: UNREACHABLE(); } diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc index 7227ba8a73..020cf41469 100644 --- a/src/compiler/simplified-lowering.cc +++ b/src/compiler/simplified-lowering.cc @@ -3472,7 +3472,9 @@ class RepresentationSelector { } } } - case IrOpcode::kSpeculativeBigIntEqual: { + case IrOpcode::kSpeculativeBigIntEqual: + case IrOpcode::kSpeculativeBigIntLessThan: + case IrOpcode::kSpeculativeBigIntLessThanOrEqual: { // Loose equality can throw a TypeError when failing to cast an object // operand to primitive. if (truncation.IsUnused() && BothInputsAre(node, Type::BigInt())) { diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc index 22374bbd81..1897fa23df 100644 --- a/src/compiler/simplified-operator.cc +++ b/src/compiler/simplified-operator.cc @@ -554,7 +554,9 @@ BigIntOperationHint BigIntOperationHintOf(const Operator* op) { op->opcode() == IrOpcode::kSpeculativeBigIntBitwiseXor || op->opcode() == IrOpcode::kSpeculativeBigIntShiftLeft || op->opcode() == IrOpcode::kSpeculativeBigIntShiftRight || - op->opcode() == IrOpcode::kSpeculativeBigIntEqual); + op->opcode() == IrOpcode::kSpeculativeBigIntEqual || + op->opcode() == IrOpcode::kSpeculativeBigIntLessThan || + op->opcode() == IrOpcode::kSpeculativeBigIntLessThanOrEqual); return OpParameter(op); } @@ -761,6 +763,8 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(Integral32OrMinusZeroToBigInt, Operator::kNoProperties, 1, 0) \ V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \ V(BigIntEqual, Operator::kNoProperties, 2, 0) \ + V(BigIntLessThan, Operator::kNoProperties, 2, 0) \ + V(BigIntLessThanOrEqual, Operator::kNoProperties, 2, 0) \ V(BigIntNegate, Operator::kNoProperties, 1, 0) \ V(StringConcat, Operator::kNoProperties, 3, 0) \ V(StringToNumber, Operator::kNoProperties, 1, 0) \ @@ -1680,6 +1684,7 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole( CheckFloat64HoleParameters(mode, feedback)); } +// TODO(panq): Cache speculative bigint operators. #define SPECULATIVE_BIGINT_BINOP(Name) \ const Operator* SimplifiedOperatorBuilder::Name(BigIntOperationHint hint) { \ return zone()->New>( \ @@ -1687,6 +1692,9 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole( 1, 1, 1, 1, 0, hint); \ } SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(SPECULATIVE_BIGINT_BINOP) +SPECULATIVE_BIGINT_BINOP(SpeculativeBigIntEqual) +SPECULATIVE_BIGINT_BINOP(SpeculativeBigIntLessThan) +SPECULATIVE_BIGINT_BINOP(SpeculativeBigIntLessThanOrEqual) #undef SPECULATIVE_BIGINT_BINOP const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate( @@ -2010,15 +2018,6 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeNumberEqual( UNREACHABLE(); } -const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntEqual( - BigIntOperationHint hint) { - // TODO(panq): Cache speculative bigint operators. - return zone()->New>( - IrOpcode::kSpeculativeBigIntEqual, - Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntEqual", 2, 1, - 1, 1, 1, 0, hint); -} - #define ACCESS_OP_LIST(V) \ V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \ V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \ diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h index c8d14beee2..d3550af3bb 100644 --- a/src/compiler/simplified-operator.h +++ b/src/compiler/simplified-operator.h @@ -818,6 +818,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* BigIntNegate(); const Operator* BigIntEqual(); + const Operator* BigIntLessThan(); + const Operator* BigIntLessThanOrEqual(); const Operator* SpeculativeSafeIntegerAdd(NumberOperationHint hint); const Operator* SpeculativeSafeIntegerSubtract(NumberOperationHint hint); @@ -856,6 +858,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const FeedbackSource& feedback); const Operator* SpeculativeBigIntEqual(BigIntOperationHint hint); + const Operator* SpeculativeBigIntLessThan(BigIntOperationHint hint); + const Operator* SpeculativeBigIntLessThanOrEqual(BigIntOperationHint hint); const Operator* ReferenceEqual(); const Operator* SameValue(); diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc index 55a97c982e..76ddcefff7 100644 --- a/src/compiler/typer.cc +++ b/src/compiler/typer.cc @@ -418,7 +418,7 @@ class Typer::Visitor : public Reducer { static Type NumberEqualTyper(Type, Type, Typer*); static Type NumberLessThanTyper(Type, Type, Typer*); static Type NumberLessThanOrEqualTyper(Type, Type, Typer*); - static Type BigIntEqualTyper(Type, Type, Typer*); + static Type BigIntCompareTyper(Type, Type, Typer*); static Type ReferenceEqualTyper(Type, Type, Typer*); static Type SameValueTyper(Type, Type, Typer*); static Type SameValueNumbersOnlyTyper(Type, Type, Typer*); @@ -2141,7 +2141,7 @@ Type Typer::Visitor::NumberLessThanOrEqualTyper(Type lhs, Type rhs, Typer* t) { } // static -Type Typer::Visitor::BigIntEqualTyper(Type lhs, Type rhs, Typer* t) { +Type Typer::Visitor::BigIntCompareTyper(Type lhs, Type rhs, Typer* t) { if (lhs.IsNone() || rhs.IsNone()) { return Type::None(); } @@ -2172,13 +2172,17 @@ Type Typer::Visitor::TypeSpeculativeNumberLessThanOrEqual(Node* node) { return TypeBinaryOp(node, NumberLessThanOrEqualTyper); } -Type Typer::Visitor::TypeBigIntEqual(Node* node) { - return TypeBinaryOp(node, BigIntEqualTyper); -} - -Type Typer::Visitor::TypeSpeculativeBigIntEqual(Node* node) { - return TypeBinaryOp(node, BigIntEqualTyper); -} +#define BIGINT_COMPARISON_BINOP(Name) \ + Type Typer::Visitor::Type##Name(Node* node) { \ + return TypeBinaryOp(node, BigIntCompareTyper); \ + } +BIGINT_COMPARISON_BINOP(BigIntEqual) +BIGINT_COMPARISON_BINOP(BigIntLessThan) +BIGINT_COMPARISON_BINOP(BigIntLessThanOrEqual) +BIGINT_COMPARISON_BINOP(SpeculativeBigIntEqual) +BIGINT_COMPARISON_BINOP(SpeculativeBigIntLessThan) +BIGINT_COMPARISON_BINOP(SpeculativeBigIntLessThanOrEqual) +#undef BIGINT_COMPARISON_BINOP Type Typer::Visitor::TypeStringConcat(Node* node) { return Type::String(); } diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc index 52592d641c..ffc7f850fd 100644 --- a/src/compiler/verifier.cc +++ b/src/compiler/verifier.cc @@ -1000,6 +1000,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckTypeIs(node, Type::BigInt()); break; case IrOpcode::kSpeculativeBigIntEqual: + case IrOpcode::kSpeculativeBigIntLessThan: + case IrOpcode::kSpeculativeBigIntLessThanOrEqual: CheckTypeIs(node, Type::Boolean()); break; case IrOpcode::kSpeculativeBigIntNegate: @@ -1018,6 +1020,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckTypeIs(node, Type::BigInt()); break; case IrOpcode::kBigIntEqual: + case IrOpcode::kBigIntLessThan: + case IrOpcode::kBigIntLessThanOrEqual: CheckValueInputIs(node, 0, Type::BigInt()); CheckValueInputIs(node, 1, Type::BigInt()); CheckTypeIs(node, Type::Boolean()); diff --git a/test/mjsunit/compiler/bigint-compare.js b/test/mjsunit/compiler/bigint-compare.js new file mode 100644 index 0000000000..65fac50428 --- /dev/null +++ b/test/mjsunit/compiler/bigint-compare.js @@ -0,0 +1,61 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax --turbofan --no-always-turbofan + +function LessThan(x, y) { + return x < y; +} + +function LessThanOrEqual(x, y) { + return x <= y; +} + +function GreaterThan(x, y) { + return x > y; +} + +function GreaterThanOrEqual(x, y) { + return x >= y; +} + +function Test(f, large, lt, eq) { + assertEquals(lt, f(1n, 2n)); + assertEquals(!lt, f(0n, -1n)); + assertEquals(eq, f(-42n, -42n)); + assertEquals(!lt, f(-(2n ** 62n), -(2n ** 63n) + 1n)); + assertEquals(lt, f(-(2n ** 63n) + 1n, (2n ** 63n) - 1n)); + if (large) { + assertEquals(lt, f(2n ** 63n - 1n, 2n ** 63n)); + assertEquals(!lt, f(-(2n ** 63n) + 1n, -(2n ** 63n))); + assertEquals(lt, f(-(13n ** 70n), 13n ** 70n)); // Different signs + assertEquals(!lt, f(13n ** 70n, -(13n ** 70n))); + assertEquals(lt, f(13n ** 80n, 13n ** 90n)); // Different lengths + assertEquals(!lt, f(-(13n ** 70n), -(13n ** 80n))); // Same length + assertEquals(eq, f(13n ** 70n, 13n ** 70n)); + } +} + +function OptAndTest(f, large) { + const lt = f === LessThan || f === LessThanOrEqual; + const eq = f === LessThanOrEqual || f === GreaterThanOrEqual; + %PrepareFunctionForOptimization(f); + Test(f, large, lt, eq); + assertUnoptimized(f); + %OptimizeFunctionOnNextCall(f); + Test(f, large, lt, eq); + assertOptimized(f); +} + +OptAndTest(LessThan, false); +OptAndTest(LessThanOrEqual, false); +OptAndTest(GreaterThan, false); +OptAndTest(GreaterThanOrEqual, false); +if (%Is64Bit()) { + // Should deopt on large bigints and there should not be deopt loops. + OptAndTest(LessThan, true); + OptAndTest(LessThanOrEqual, true); + OptAndTest(GreaterThan, true); + OptAndTest(GreaterThanOrEqual, true); +} From 8d49029b64f41880b71c19e26621bc2f3aca8110 Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Thu, 22 Dec 2022 14:48:35 +0100 Subject: [PATCH 045/654] [maglev][arm64] Add CheckMaps Bug: v8:7700 Change-Id: I7d4a194be5c488ab90e3684db2e63f42a0a9738a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4115759 Reviewed-by: Patrick Thier Auto-Submit: Victor Gomes Commit-Queue: Patrick Thier Cr-Commit-Position: refs/heads/main@{#84996} --- src/maglev/arm64/maglev-ir-arm64.cc | 176 +++++++++++++++++++++++++++- src/maglev/maglev-assembler.h | 7 ++ src/maglev/x64/maglev-ir-x64.cc | 5 - 3 files changed, 181 insertions(+), 7 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 4009251dc8..8853b87f56 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -130,8 +130,6 @@ UNIMPLEMENTED_NODE(CheckJSArrayBounds) UNIMPLEMENTED_NODE(CheckJSDataViewBounds, element_type_) UNIMPLEMENTED_NODE(CheckJSObjectElementsBounds) UNIMPLEMENTED_NODE(CheckJSTypedArrayBounds, elements_kind_) -UNIMPLEMENTED_NODE(CheckMaps, check_type_) -UNIMPLEMENTED_NODE_WITH_CALL(CheckMapsWithMigration, check_type_) UNIMPLEMENTED_NODE_WITH_CALL(JumpLoopPrologue, loop_depth_, unit_) UNIMPLEMENTED_NODE_WITH_CALL(StoreMap) UNIMPLEMENTED_NODE(StoreDoubleField) @@ -361,6 +359,180 @@ void CheckedTruncateNumberToInt32::GenerateCode(MaglevAssembler* masm, __ bind(&done); } +void CheckMaps::SetValueLocationConstraints() { UseRegister(receiver_input()); } +void CheckMaps::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(receiver_input()); + + // TODO(victorgomes): This can happen, because we do not emit an unconditional + // deopt when we intersect the map sets. + if (maps().is_empty()) { + __ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap); + __ B(eager_deopt_info()->deopt_entry_label()); + return; + } + + bool maps_include_heap_number = AnyMapIsHeapNumber(maps()); + + Label done; + if (check_type_ == CheckType::kOmitHeapObjectCheck) { + __ AssertNotSmi(object); + } else { + Condition is_smi = __ CheckSmi(object); + if (maps_include_heap_number) { + // Smis count as matching the HeapNumber map, so we're done. + __ B(&done); + } else { + __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this); + } + } + + UseScratchRegisterScope temps(masm); + Register object_map = temps.AcquireX(); + Register map = temps.AcquireX(); + __ LoadMap(object_map, object); + size_t map_count = maps().size(); + for (size_t i = 0; i < map_count - 1; ++i) { + Handle map_handle = maps().at(i); + __ Move(map, map_handle); + __ CmpTagged(object_map, map); + __ B(&done, eq); + } + Handle last_map_handle = maps().at(map_count - 1); + __ Move(map, last_map_handle); + __ CmpTagged(object_map, map); + __ EmitEagerDeoptIf(ne, DeoptimizeReason::kWrongMap, this); + __ bind(&done); +} + +int CheckMapsWithMigration::MaxCallStackArgs() const { + DCHECK_EQ(Runtime::FunctionForId(Runtime::kTryMigrateInstance)->nargs, 1); + return 1; +} +void CheckMapsWithMigration::SetValueLocationConstraints() { + UseRegister(receiver_input()); + set_temporaries_needed(1); +} +void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + __ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap); + + // TODO(victorgomes): This can happen, because we do not emit an unconditional + // deopt when we intersect the map sets. + if (maps().is_empty()) { + __ B(eager_deopt_info()->deopt_entry_label()); + return; + } + + Register object = ToRegister(receiver_input()); + + bool maps_include_heap_number = AnyMapIsHeapNumber(maps()); + + ZoneLabelRef done(masm); + if (check_type_ == CheckType::kOmitHeapObjectCheck) { + __ AssertNotSmi(object); + } else { + Condition is_smi = __ CheckSmi(object); + if (maps_include_heap_number) { + // Smis count as matching the HeapNumber map, so we're done. + __ B(*done); + } else { + __ B(eager_deopt_info()->deopt_entry_label(), is_smi); + } + } + + // Use general temporaries to be able to send to deferred code. + Register object_map = general_temporaries().PopFirst(); + __ LoadMap(object_map, object); + + size_t map_count = maps().size(); + for (size_t i = 0; i < map_count; ++i) { + ZoneLabelRef continue_label(masm); + Handle map_handle = maps().at(i); + { + UseScratchRegisterScope temps(masm); + Register map = temps.AcquireX(); + __ Move(map, map_handle); + __ CmpTagged(object_map, map); + } + bool last_map = (i == map_count - 1); + if (map_handle->is_migration_target()) { + __ JumpToDeferredIf( + ne, + [](MaglevAssembler* masm, ZoneLabelRef continue_label, + ZoneLabelRef done, Register object, Register object_map, + int map_index, CheckMapsWithMigration* node) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + // If the map is not deprecated, we fail the map check, continue to + // the next one. + __ Ldr(scratch.W(), + FieldMemOperand(object_map, Map::kBitField3Offset)); + __ TestAndBranchIfAllClear(scratch.W(), + Map::Bits3::IsDeprecatedBit::kMask, + *continue_label); + + // Otherwise, try migrating the object. If the migration + // returns Smi zero, then it failed the migration. + Register return_val = Register::no_reg(); + { + RegisterSnapshot register_snapshot = node->register_snapshot(); + // We can eager deopt after the snapshot, so make sure the nodes + // used by the deopt are included in it. + // TODO(leszeks): This is a bit of a footgun -- we likely want the + // snapshot to always include eager deopt input registers. + AddDeoptRegistersToSnapshot(®ister_snapshot, + node->eager_deopt_info()); + SaveRegisterStateForCall save_register_state(masm, + register_snapshot); + + __ Push(object); + __ Move(kContextRegister, masm->native_context().object()); + __ CallRuntime(Runtime::kTryMigrateInstance); + save_register_state.DefineSafepoint(); + + // Make sure the return value is preserved across the live + // register restoring pop all. + return_val = kReturnRegister0; + if (register_snapshot.live_registers.has(return_val)) { + DCHECK(!register_snapshot.live_registers.has(scratch)); + __ Mov(scratch, return_val); + return_val = scratch; + } + } + + // On failure, the returned value is zero + __ Cbz(return_val, *continue_label); + + // The migrated object is returned on success, retry the map check. + __ Move(object, return_val); + __ Move(scratch, node->maps().at(map_index)); + __ CmpTagged(object_map, scratch); + __ B(*done, eq); + __ B(*continue_label); + }, + // If this is the last map to check, we should deopt if we fail. + // This is safe to do, since {eager_deopt_info} is ZoneAllocated. + (last_map ? ZoneLabelRef::UnsafeFromLabelPointer( + eager_deopt_info()->deopt_entry_label()) + : continue_label), + done, object, object_map, i, this); + } else if (last_map) { + // If it is the last map and it is not a migration target, we should deopt + // if the check fails. + __ B(eager_deopt_info()->deopt_entry_label(), ne); + } + + if (!last_map) { + // We don't need to bind the label for the last map. + __ B(*done, eq); + __ bind(*continue_label); + } + } + + __ bind(*done); +} + void CheckNumber::SetValueLocationConstraints() { UseRegister(receiver_input()); } diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index f087c38e7c..7d268dd352 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -561,6 +561,13 @@ struct is_iterator_range> : std::true_type {}; } // namespace detail +// General helpers. + +inline bool AnyMapIsHeapNumber(const ZoneHandleSet& maps) { + return std::any_of(maps.begin(), maps.end(), + [](Handle map) { return map->IsHeapNumberMap(); }); +} + } // namespace maglev } // namespace internal } // namespace v8 diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index ba9a7e9156..7b150fe604 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -222,11 +222,6 @@ void AssertInt32::GenerateCode(MaglevAssembler* masm, __ Check(ToCondition(condition_), reason_); } -bool AnyMapIsHeapNumber(const ZoneHandleSet& maps) { - return std::any_of(maps.begin(), maps.end(), - [](Handle map) { return map->IsHeapNumberMap(); }); -} - void CheckMaps::SetValueLocationConstraints() { UseRegister(receiver_input()); } void CheckMaps::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { From 20183a7916a6225d3b97636948b7acb154c9654e Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Thu, 22 Dec 2022 16:45:38 +0100 Subject: [PATCH 046/654] [maglev][arm64] Fix ToBoolean We should compare only 32 bits for smi when pointer compression is enabled. Bug: v8:7700 Change-Id: I6cbc22e57e873cb8ab3c26a4b655a393e258e697 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4122831 Auto-Submit: Victor Gomes Commit-Queue: Patrick Thier Commit-Queue: Victor Gomes Reviewed-by: Patrick Thier Cr-Commit-Position: refs/heads/main@{#84997} --- src/maglev/arm64/maglev-assembler-arm64.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/maglev/arm64/maglev-assembler-arm64.cc b/src/maglev/arm64/maglev-assembler-arm64.cc index 8feb47a9f0..6d65404948 100644 --- a/src/maglev/arm64/maglev-assembler-arm64.cc +++ b/src/maglev/arm64/maglev-assembler-arm64.cc @@ -102,13 +102,13 @@ void MaglevAssembler::ToBoolean(Register value, ZoneLabelRef is_true, Register map = temps.AcquireX(); // Check if {{value}} is Smi. - CheckSmi(value); + Condition is_smi = CheckSmi(value); JumpToDeferredIf( - eq, + is_smi, [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true, ZoneLabelRef is_false) { // Check if {value} is not zero. - __ Cmp(value, Smi::FromInt(0)); + __ CmpTagged(value, Smi::FromInt(0)); __ JumpIf(eq, *is_false); __ Jump(*is_true); }, From 2f2e8c4024a2e989f71c677f98cb673881bac9cd Mon Sep 17 00:00:00 2001 From: pthier Date: Thu, 22 Dec 2022 16:57:23 +0100 Subject: [PATCH 047/654] [maglev] Fix negate 0 with smi feedback With smi feedback, we use int32 operations for arithmetics. When negating 0, we have to fallback to float as we can't represent -0 in int32. We can simply deopt in that case without causing a deopt loop, as a non-smi result will change the feedback to kSignedSmallInputs (from kSignedSmall). Bug: chromium:1403102 Change-Id: Ic27c267349a1de6904639e91b1cade2c4f7d1fe2 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4122829 Reviewed-by: Victor Gomes Commit-Queue: Patrick Thier Auto-Submit: Patrick Thier Cr-Commit-Position: refs/heads/main@{#84998} --- src/maglev/arm64/maglev-ir-arm64.cc | 7 +++ src/maglev/x64/maglev-ir-x64.cc | 4 ++ test/mjsunit/maglev/negate.js | 67 +++++++++++++++++++++++++++++ 3 files changed, 78 insertions(+) create mode 100644 test/mjsunit/maglev/negate.js diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 8853b87f56..d4f2aa2365 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -72,6 +72,13 @@ void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { Register value = ToRegister(value_input()).W(); Register out = ToRegister(result()).W(); + + // Deopt when result would be -0. + static_assert(Int32NegateWithOverflow::kProperties.can_eager_deopt()); + __ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kOverflow); + __ RecordComment("-- Jump to eager deopt"); + __ Cbz(value, eager_deopt_info()->deopt_entry_label()); + __ negs(out, value); // Output register must not be a register input into the eager deopt info. DCHECK_REGLIST_EMPTY(RegList{out} & diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 7b150fe604..7101871b68 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -1697,6 +1697,10 @@ void Int32NegateWithOverflow::SetValueLocationConstraints() { void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { Register value = ToRegister(value_input()); + // Deopt when the result would be -0. + __ testl(value, value); + __ EmitEagerDeoptIf(zero, DeoptimizeReason::kOverflow, this); + __ negl(value); __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this); } diff --git a/test/mjsunit/maglev/negate.js b/test/mjsunit/maglev/negate.js new file mode 100644 index 0000000000..6e3c2d61c4 --- /dev/null +++ b/test/mjsunit/maglev/negate.js @@ -0,0 +1,67 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax --maglev + +function negate(val) { + return -val; +} + +function test_negate_int32(value, expected) { + // Warmup. + %PrepareFunctionForOptimization(negate); + %ClearFunctionFeedback(negate); + negate(1, -1); + %OptimizeMaglevOnNextCall(negate); + assertEquals(expected, negate(value)); + assertTrue(isMaglevved(negate)); + + %DeoptimizeFunction(negate); + assertEquals(expected, negate(value)); +} + +test_negate_int32(1, -1); +test_negate_int32(-1, 1); +test_negate_int32(42, -42); +test_negate_int32(-42, 42); + +function test_negate_float(value, expected) { + // Warmup. + %PrepareFunctionForOptimization(negate); + %ClearFunctionFeedback(negate); + negate(1.1, -1.1); + %OptimizeMaglevOnNextCall(negate); + assertEquals(expected, negate(value)); + assertTrue(isMaglevved(negate)); + + %DeoptimizeFunction(negate); + assertEquals(expected, negate(value)); +} + +test_negate_float(1.23, -1.23); +test_negate_float(-1.001, 1.001); +test_negate_float(42.42, -42.42); +test_negate_float(-42.42, 42.42); + +const int32_max = Math.pow(2,30)-1; +const int32_min = -Math.pow(2,31); +test_negate_float(int32_max, -int32_max); +test_negate_float(int32_min, -int32_min); + +function test_negate_int32_expect_deopt(value, expected) { + // Warmup. + %PrepareFunctionForOptimization(negate); + %ClearFunctionFeedback(negate); + negate(12, -12); + %OptimizeMaglevOnNextCall(negate); + assertEquals(expected, negate(value)); + assertFalse(isMaglevved(negate)); +} + +test_negate_int32_expect_deopt(0, -0); +test_negate_int32_expect_deopt(-0, 0); +test_negate_int32_expect_deopt(int32_min, -int32_min); +test_negate_int32_expect_deopt(-int32_min, int32_min); +test_negate_int32_expect_deopt(int32_max, -int32_max); +test_negate_int32_expect_deopt(-int32_max, int32_max); From a8f6a56e9ae34f918324dc753cc1456b22ffc51f Mon Sep 17 00:00:00 2001 From: Milad Fa Date: Thu, 22 Dec 2022 10:35:29 -0500 Subject: [PATCH 048/654] PPC/s390: [wasm] Fix printing of wasm-to-js frames Port e17eee4894be67f715a7b2d7f17d8b69724f1cf8 Original Commit Message: After https://crrev.com/c/3859787 those frames would be printed like standard Wasm frames, but in the place of the WasmInstanceObject, they have a WasmApiFunctionRef object instead. So special-case the {WasmToJsFrame::instance()} to load the instance properly. Also special-case the {position()} accessor for imported functions. R=clemensb@chromium.org, joransiu@ca.ibm.com, junyan@redhat.com, midawson@redhat.com BUG= LOG=N Change-Id: I370bc4f4f84e1262c4542879b9058f6cf1d9f84e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4122830 Commit-Queue: Milad Farazmand Reviewed-by: Clemens Backes Cr-Commit-Position: refs/heads/main@{#84999} --- src/compiler/backend/ppc/code-generator-ppc.cc | 4 ++++ src/compiler/backend/s390/code-generator-s390.cc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/compiler/backend/ppc/code-generator-ppc.cc b/src/compiler/backend/ppc/code-generator-ppc.cc index 838a823a96..f634d016eb 100644 --- a/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/src/compiler/backend/ppc/code-generator-ppc.cc @@ -3209,6 +3209,10 @@ void CodeGenerator::AssembleConstructFrame() { if (call_descriptor->IsWasmFunctionCall() || call_descriptor->IsWasmImportWrapper() || call_descriptor->IsWasmCapiFunction()) { + // For import wrappers and C-API functions, this stack slot is only used + // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef + // instead of the instance itself, which is taken care of in the frames + // accessors. __ Push(kWasmInstanceRegister); } if (call_descriptor->IsWasmCapiFunction()) { diff --git a/src/compiler/backend/s390/code-generator-s390.cc b/src/compiler/backend/s390/code-generator-s390.cc index 0141cafd23..da38fab641 100644 --- a/src/compiler/backend/s390/code-generator-s390.cc +++ b/src/compiler/backend/s390/code-generator-s390.cc @@ -3406,6 +3406,10 @@ void CodeGenerator::AssembleConstructFrame() { if (call_descriptor->IsWasmFunctionCall() || call_descriptor->IsWasmImportWrapper() || call_descriptor->IsWasmCapiFunction()) { + // For import wrappers and C-API functions, this stack slot is only used + // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef + // instead of the instance itself, which is taken care of in the frames + // accessors. __ Push(kWasmInstanceRegister); } if (call_descriptor->IsWasmCapiFunction()) { From fcda478d890caea6bf04a50e6106682b64cf8d5a Mon Sep 17 00:00:00 2001 From: Toon Verwaest Date: Thu, 22 Dec 2022 17:56:47 +0100 Subject: [PATCH 049/654] [maglev] Always initialize old_type in EnsureType Bug: v8:7700, v8:13611, v8:13612 Change-Id: Ieaf510750f15dc2c96d8c3adba7ceaee7937c1f8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4120070 Reviewed-by: Patrick Thier Auto-Submit: Toon Verwaest Commit-Queue: Toon Verwaest Cr-Commit-Position: refs/heads/main@{#85000} --- src/maglev/maglev-graph-builder.cc | 22 +++++++++------------- src/maglev/maglev-graph-builder.h | 1 - 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index 8dea048946..d6384c853a 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -1416,13 +1416,18 @@ NodeType StaticTypeForNode(ValueNode* node) { } } // namespace -NodeInfo* MaglevGraphBuilder::CreateInfoIfNot(ValueNode* node, NodeType type) { +bool MaglevGraphBuilder::EnsureType(ValueNode* node, NodeType type, + NodeType* old_type) { NodeType static_type = StaticTypeForNode(node); - if (NodeTypeIs(static_type, type)) return nullptr; + if (NodeTypeIs(static_type, type)) { + if (old_type) *old_type = static_type; + return true; + } NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(node); - if (NodeTypeIs(known_info->type, type)) return nullptr; + if (old_type) *old_type = known_info->type; + if (NodeTypeIs(known_info->type, type)) return true; known_info->type = CombineType(known_info->type, static_type); - return known_info; + return false; } bool MaglevGraphBuilder::CheckType(ValueNode* node, NodeType type) { @@ -1432,15 +1437,6 @@ bool MaglevGraphBuilder::CheckType(ValueNode* node, NodeType type) { return NodeTypeIs(it->second.type, type); } -bool MaglevGraphBuilder::EnsureType(ValueNode* node, NodeType type, - NodeType* old) { - NodeInfo* known_info = CreateInfoIfNot(node, type); - if (known_info == nullptr) return true; - if (old != nullptr) *old = known_info->type; - known_info->type = CombineType(known_info->type, type); - return false; -} - ValueNode* MaglevGraphBuilder::BuildSmiUntag(ValueNode* node) { if (EnsureType(node, NodeType::kSmi)) { return AddNewNode({node}); diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index 7bc0565bde..acbfa59080 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -106,7 +106,6 @@ class MaglevGraphBuilder { class CallSpeculationScope; bool CheckType(ValueNode* node, NodeType type); - NodeInfo* CreateInfoIfNot(ValueNode* node, NodeType type); bool EnsureType(ValueNode* node, NodeType type, NodeType* old = nullptr); bool is_toptier() { return v8_flags.lower_tier_as_toptier && !v8_flags.turbofan; From c74a6cd257df45a816811f22af6004af51681898 Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Thu, 22 Dec 2022 17:43:35 +0000 Subject: [PATCH 050/654] Revert "Reland "[static-roots] Enable static roots on supported configurations"" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit b247270178dcfffe9af4389dbb84d1643bfccea4. Reason for revert: Breaking Fuchsia build and blocking roll: https://ci.chromium.org/ui/p/chromium/builders/try/fuchsia-binary-size/186323/overview Original change's description: > Reland "[static-roots] Enable static roots on supported configurations" > > This is a reland of commit c04ca9cc63417d24455704cbee44eb60b79f7af2 > > Original change's description: > > [static-roots] Enable static roots on supported configurations > > > > The static root values are not actually used yet. > > > > Bug: v8:13466 > > Change-Id: I85fc99277c31e0dd4350a305040ab25456051046 > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4101880 > > Reviewed-by: Toon Verwaest > > Commit-Queue: Olivier Flückiger > > Cr-Commit-Position: refs/heads/main@{#84850} > > Bug: v8:13466 > Change-Id: Id65bb5b19df999dfe930a78993e4bf3343d9f996 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111641 > Auto-Submit: Olivier Flückiger > Reviewed-by: Toon Verwaest > Commit-Queue: Toon Verwaest > Cr-Commit-Position: refs/heads/main@{#84991} Bug: v8:13466 Change-Id: I9a930649d5c5531f2b8d8472300f6efe3f75ae28 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4122795 Auto-Submit: Shu-yu Guo Owners-Override: Shu-yu Guo Bot-Commit: Rubber Stamper Commit-Queue: Rubber Stamper Cr-Commit-Position: refs/heads/main@{#85001} --- BUILD.gn | 2 +- src/roots/static-roots.h | 753 +------------------------------ src/snapshot/static-roots-gen.cc | 3 - tools/v8heapconst.py | 474 +++++++++---------- 4 files changed, 240 insertions(+), 992 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index 50165520c2..682dfddc4a 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -121,7 +121,7 @@ declare_args() { v8_enable_snapshot_native_code_counters = "" # Use pre-generated static root pointer values from static-roots.h. - v8_enable_static_roots = "" + v8_enable_static_roots = false # Enable code-generation-time checking of types in the CodeStubAssembler. v8_enable_verify_csa = false diff --git a/src/roots/static-roots.h b/src/roots/static-roots.h index eeced43597..eb4aebd879 100644 --- a/src/roots/static-roots.h +++ b/src/roots/static-roots.h @@ -2,766 +2,17 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -// This file is automatically generated by `tools/dev/gen-static-roots.py`. Do -// not edit manually. - #ifndef V8_ROOTS_STATIC_ROOTS_H_ #define V8_ROOTS_STATIC_ROOTS_H_ #include "src/common/globals.h" - #if V8_STATIC_ROOTS_BOOL -// Disabling Wasm or Intl invalidates the contents of static-roots.h. -// TODO(olivf): To support static roots for multiple build configurations we -// will need to generate target specific versions of this file. -static_assert(V8_ENABLE_WEBASSEMBLY); -static_assert(V8_INTL_SUPPORT); - namespace v8 { namespace internal { -constexpr static std::array StaticReadOnlyRootsPointerTable = { - 0x3235, // free_space_map - 0x38a5, // one_pointer_filler_map - 0x38cd, // two_pointer_filler_map - 0x7b19, // uninitialized_value - 0x22e1, // undefined_value - 0x22fd, // the_hole_value - 0x22c5, // null_value - 0x3f41, // true_value - 0x3f5d, // false_value - 0x543d, // empty_string - 0x2141, // meta_map - 0x31e5, // byte_array_map - 0x2169, // fixed_array_map - 0x21e1, // fixed_cow_array_map - 0x3995, // hash_table_map - 0x2b7d, // symbol_map - 0x2dad, // one_byte_string_map - 0x2f3d, // one_byte_internalized_string_map - 0x2ab5, // scope_info_map - 0x3d05, // shared_function_info_map - 0x3825, // code_map - 0x384d, // cell_map - 0x387d, // global_property_cell_map - 0x2ba5, // foreign_map - 0x2b55, // heap_number_map - 0x396d, // transition_array_map - 0x302d, // thin_one_byte_string_map - 0x2b2d, // feedback_vector_map - 0x3f19, // empty_scope_info - 0x22a9, // empty_fixed_array - 0x2aa5, // empty_descriptor_array - 0x7b51, // arguments_marker - 0x7bc9, // exception - 0x7b89, // termination_exception - 0x7be5, // optimized_out - 0x7c1d, // stale_register - 0x3bc5, // script_context_table_map - 0x2b05, // closure_feedback_cell_array_map - 0x31bd, // feedback_metadata_map - 0x3b9d, // array_list_map - 0x2d5d, // bigint_map - 0x3bed, // object_boilerplate_description_map - 0x320d, // bytecode_array_map - 0x3d7d, // code_data_container_map - 0x3c15, // coverage_info_map - 0x3195, // fixed_double_array_map - 0x3a85, // global_dictionary_map - 0x3945, // many_closures_cell_map - 0x2bcd, // mega_dom_handler_map - 0x2add, // module_info_map - 0x3a35, // name_dictionary_map - 0x38f5, // no_closures_cell_map - 0x3aad, // number_dictionary_map - 0x391d, // one_closure_cell_map - 0x39bd, // ordered_hash_map_map - 0x39e5, // ordered_hash_set_map - 0x3afd, // name_to_index_hash_table_map - 0x3b25, // registered_symbol_table_map - 0x3a0d, // ordered_name_dictionary_map - 0x3cdd, // preparse_data_map - 0x325d, // property_array_map - 0x3c3d, // accessor_info_map - 0x3c65, // side_effect_call_handler_info_map - 0x3c8d, // side_effect_free_call_handler_info_map - 0x3cb5, // next_call_side_effect_free_call_handler_info_map - 0x3ad5, // simple_number_dictionary_map - 0x3285, // small_ordered_hash_map_map - 0x32ad, // small_ordered_hash_set_map - 0x32d5, // small_ordered_name_dictionary_map - 0x3d2d, // source_text_module_map - 0x3a5d, // swiss_name_dictionary_map - 0x3d55, // synthetic_module_map - 0x3da5, // wasm_api_function_ref_map - 0x3dcd, // wasm_capi_function_data_map - 0x3df5, // wasm_exported_function_data_map - 0x3e1d, // wasm_internal_function_map - 0x3e45, // wasm_js_function_data_map - 0x3e6d, // wasm_resume_data_map - 0x3e95, // wasm_type_info_map - 0x3ebd, // wasm_continuation_object_map - 0x2191, // weak_fixed_array_map - 0x21b9, // weak_array_list_map - 0x3b75, // ephemeron_hash_table_map - 0x3b4d, // embedder_data_array_map - 0x3ee5, // weak_cell_map - 0x2d85, // string_map - 0x2dfd, // cons_one_byte_string_map - 0x2dd5, // cons_string_map - 0x3005, // thin_string_map - 0x2e25, // sliced_string_map - 0x2e4d, // sliced_one_byte_string_map - 0x2e75, // external_string_map - 0x2e9d, // external_one_byte_string_map - 0x2ec5, // uncached_external_string_map - 0x2f15, // internalized_string_map - 0x2f65, // external_internalized_string_map - 0x2f8d, // external_one_byte_internalized_string_map - 0x2fb5, // uncached_external_internalized_string_map - 0x2fdd, // uncached_external_one_byte_internalized_string_map - 0x2eed, // uncached_external_one_byte_string_map - 0x307d, // shared_one_byte_string_map - 0x3055, // shared_string_map - 0x30cd, // shared_external_one_byte_string_map - 0x30a5, // shared_external_string_map - 0x311d, // shared_uncached_external_one_byte_string_map - 0x30f5, // shared_uncached_external_string_map - 0x316d, // shared_thin_one_byte_string_map - 0x3145, // shared_thin_string_map - 0x2231, // undefined_map - 0x2281, // the_hole_map - 0x2259, // null_map - 0x2bf5, // boolean_map - 0x2c1d, // uninitialized_map - 0x2c45, // arguments_marker_map - 0x2c6d, // exception_map - 0x2c95, // termination_exception_map - 0x2cbd, // optimized_out_map - 0x2ce5, // stale_register_map - 0x2d0d, // self_reference_marker_map - 0x2d35, // basic_block_counters_marker_map - 0x2a99, // empty_enum_cache - 0x3f81, // empty_property_array - 0x3f79, // empty_byte_array - 0x3f29, // empty_object_boilerplate_description - 0x3f35, // empty_array_boilerplate_description - 0x3f89, // empty_closure_feedback_cell_array - 0x820d, // empty_slow_element_dictionary - 0x8231, // empty_ordered_hash_map - 0x8245, // empty_ordered_hash_set - 0x829d, // empty_feedback_metadata - 0x81c9, // empty_property_dictionary - 0x8259, // empty_ordered_property_dictionary - 0x827d, // empty_swiss_property_dictionary - 0x3f91, // noop_interceptor_info - 0x3f0d, // empty_array_list - 0x22b1, // empty_weak_fixed_array - 0x22b9, // empty_weak_array_list - 0x3875, // invalid_prototype_validity_cell - 0x3fc5, // nan_value - 0x3fd1, // hole_nan_value - 0x3fdd, // infinity_value - 0x3fb9, // minus_zero_value - 0x3fe9, // minus_infinity_value - 0x3ff5, // max_safe_integer - 0x4001, // max_uint_32 - 0x400d, // smi_min_value - 0x4019, // smi_max_value_plus_one - 0x4035, // single_character_string_table - 0x7c55, // self_reference_marker - 0x7c95, // basic_block_counters_marker - 0x831d, // off_heap_trampoline_relocation_info - 0x22e1, // trampoline_trivial_code_data_container - 0x22e1, // trampoline_promise_rejection_code_data_container - 0x82a9, // global_this_binding_scope_info - 0x82c9, // empty_function_scope_info - 0x82ed, // native_scope_info - 0x8305, // shadow_realm_scope_info - 0x81f1, // empty_symbol_table - 0x4025, // hash_seed - 0x5449, // adoptText_string - 0x5461, // approximatelySign_string - 0x5481, // baseName_string - 0x5495, // accounting_string - 0x54ad, // breakType_string - 0x54c5, // calendars_string - 0x54dd, // cardinal_string - 0x54f1, // caseFirst_string - 0x5509, // ceil_string - 0x5519, // compare_string - 0x552d, // collation_string - 0x5545, // collations_string - 0x555d, // compact_string - 0x5571, // compactDisplay_string - 0x558d, // currency_string - 0x55a1, // currencyDisplay_string - 0x55bd, // currencySign_string - 0x55d5, // dateStyle_string - 0x55ed, // dateTimeField_string - 0x5609, // dayPeriod_string - 0x5621, // daysDisplay_string - 0x5639, // decimal_string - 0x564d, // dialect_string - 0x5661, // digital_string - 0x5675, // direction_string - 0x568d, // endRange_string - 0x56a1, // engineering_string - 0x56b9, // exceptZero_string - 0x56d1, // expand_string - 0x56e5, // exponentInteger_string - 0x5701, // exponentMinusSign_string - 0x5721, // exponentSeparator_string - 0x5741, // fallback_string - 0x5755, // first_string - 0x5769, // firstDay_string - 0x577d, // floor_string - 0x5791, // format_string - 0x57a5, // fraction_string - 0x57b9, // fractionalDigits_string - 0x57d5, // fractionalSecond_string - 0x57f1, // full_string - 0x5801, // granularity_string - 0x5819, // grapheme_string - 0x582d, // group_string - 0x5841, // h11_string - 0x5851, // h12_string - 0x5861, // h23_string - 0x5871, // h24_string - 0x5881, // halfCeil_string - 0x5895, // halfEven_string - 0x58a9, // halfExpand_string - 0x58c1, // halfFloor_string - 0x58d9, // halfTrunc_string - 0x58f1, // hour12_string - 0x5905, // hourCycle_string - 0x591d, // hourCycles_string - 0x5935, // hoursDisplay_string - 0x594d, // ideo_string - 0x595d, // ignorePunctuation_string - 0x597d, // Invalid_Date_string - 0x5995, // integer_string - 0x59a9, // isWordLike_string - 0x59c1, // kana_string - 0x59d1, // language_string - 0x59e5, // languageDisplay_string - 0x5a01, // lessPrecision_string - 0x5a1d, // letter_string - 0x5a31, // list_string - 0x5a41, // literal_string - 0x5a55, // locale_string - 0x5a69, // loose_string - 0x5a7d, // lower_string - 0x5a91, // ltr_string - 0x5aa1, // maximumFractionDigits_string - 0x5ac5, // maximumSignificantDigits_string - 0x5ae9, // microsecondsDisplay_string - 0x5b09, // millisecondsDisplay_string - 0x5b29, // min2_string - 0x5b39, // minimalDays_string - 0x5b51, // minimumFractionDigits_string - 0x5b75, // minimumIntegerDigits_string - 0x5b95, // minimumSignificantDigits_string - 0x5bb9, // minus_0 - 0x5bc9, // minusSign_string - 0x5be1, // minutesDisplay_string - 0x5bfd, // monthsDisplay_string - 0x5c19, // morePrecision_string - 0x5c35, // nan_string - 0x5c45, // nanosecondsDisplay_string - 0x5c65, // narrowSymbol_string - 0x5c7d, // negative_string - 0x5c91, // never_string - 0x5ca5, // none_string - 0x5cb5, // notation_string - 0x5cc9, // normal_string - 0x5cdd, // numberingSystem_string - 0x5cf9, // numberingSystems_string - 0x5d15, // numeric_string - 0x5d29, // ordinal_string - 0x5d3d, // percentSign_string - 0x5d55, // plusSign_string - 0x5d69, // quarter_string - 0x5d7d, // region_string - 0x5d91, // relatedYear_string - 0x5da9, // roundingMode_string - 0x5dc1, // roundingPriority_string - 0x5ddd, // rtl_string - 0x5ded, // scientific_string - 0x5e05, // secondsDisplay_string - 0x5e21, // segment_string - 0x5e35, // SegmentIterator_string - 0x5e51, // Segments_string - 0x5e65, // sensitivity_string - 0x5e7d, // sep_string - 0x5e8d, // shared_string - 0x5ea1, // signDisplay_string - 0x5eb9, // standard_string - 0x5ecd, // startRange_string - 0x5ee5, // strict_string - 0x5ef9, // stripIfInteger_string - 0x5f15, // style_string - 0x5f29, // term_string - 0x5f39, // textInfo_string - 0x5f4d, // timeStyle_string - 0x5f65, // timeZones_string - 0x5f7d, // timeZoneName_string - 0x5f95, // trailingZeroDisplay_string - 0x5fb5, // trunc_string - 0x5fc9, // two_digit_string - 0x5fdd, // type_string - 0x5fed, // unknown_string - 0x6001, // upper_string - 0x6015, // usage_string - 0x6029, // useGrouping_string - 0x6041, // unitDisplay_string - 0x6059, // weekday_string - 0x606d, // weekend_string - 0x6081, // weeksDisplay_string - 0x6099, // weekInfo_string - 0x60ad, // yearName_string - 0x60c1, // yearsDisplay_string - 0x60d9, // add_string - 0x60e9, // AggregateError_string - 0x6105, // always_string - 0x6119, // anonymous_function_string - 0x6139, // anonymous_string - 0x6151, // apply_string - 0x6165, // Arguments_string - 0x617d, // arguments_string - 0x6195, // arguments_to_string - 0x61b5, // Array_string - 0x61c9, // array_to_string - 0x61e5, // ArrayBuffer_string - 0x61fd, // ArrayIterator_string - 0x6219, // as_string - 0x6229, // assert_string - 0x623d, // async_string - 0x6251, // AtomicsCondition_string - 0x6271, // AtomicsMutex_string - 0x628d, // auto_string - 0x629d, // await_string - 0x62b1, // BigInt_string - 0x62c5, // bigint_string - 0x62d9, // BigInt64Array_string - 0x62f5, // BigUint64Array_string - 0x6311, // bind_string - 0x6321, // blank_string - 0x6335, // Boolean_string - 0x6349, // boolean_string - 0x635d, // boolean_to_string - 0x6379, // bound__string - 0x638d, // buffer_string - 0x63a1, // byte_length_string - 0x63b9, // byte_offset_string - 0x63d1, // CompileError_string - 0x63e9, // calendar_string - 0x63fd, // callee_string - 0x6411, // caller_string - 0x6425, // cause_string - 0x6439, // character_string - 0x6451, // closure_string - 0x6469, // code_string - 0x6479, // column_string - 0x648d, // computed_string - 0x64a5, // configurable_string - 0x64bd, // conjunction_string - 0x64d5, // console_string - 0x64e9, // constrain_string - 0x6501, // construct_string - 0x6519, // current_string - 0x652d, // Date_string - 0x653d, // date_to_string - 0x6559, // dateAdd_string - 0x656d, // dateFromFields_string - 0x6589, // dateUntil_string - 0x65a1, // day_string - 0x65b1, // dayOfWeek_string - 0x65c9, // dayOfYear_string - 0x65e1, // days_string - 0x65f1, // daysInMonth_string - 0x6609, // daysInWeek_string - 0x6621, // daysInYear_string - 0x6639, // default_string - 0x664d, // defineProperty_string - 0x6669, // deleteProperty_string - 0x6685, // disjunction_string - 0x669d, // done_string - 0x66ad, // dot_brand_string - 0x66c1, // dot_catch_string - 0x66d5, // dot_default_string - 0x66e9, // dot_for_string - 0x66f9, // dot_generator_object_string - 0x6719, // dot_home_object_string - 0x6731, // dot_new_target_string - 0x6749, // dot_result_string - 0x675d, // dot_repl_result_string - 0x6775, // dot_static_home_object_string - 0x471d, // dot_string - 0x6795, // dot_switch_tag_string - 0x67ad, // dotAll_string - 0x67c1, // Error_string - 0x67d5, // EvalError_string - 0x67ed, // enumerable_string - 0x6805, // element_string - 0x6819, // epochMicroseconds_string - 0x6839, // epochMilliseconds_string - 0x6859, // epochNanoseconds_string - 0x6875, // epochSeconds_string - 0x688d, // era_string - 0x689d, // eraYear_string - 0x68b1, // errors_string - 0x68c5, // error_to_string - 0x68e1, // eval_string - 0x68f1, // exception_string - 0x6909, // exec_string - 0x6919, // false_string - 0x692d, // fields_string - 0x6941, // FinalizationRegistry_string - 0x6961, // flags_string - 0x6975, // Float32Array_string - 0x698d, // Float64Array_string - 0x69a5, // fractionalSecondDigits_string - 0x69c9, // from_string - 0x69d9, // Function_string - 0x69ed, // function_native_code_string - 0x6a19, // function_string - 0x6a2d, // function_to_string - 0x6a4d, // Generator_string - 0x6a65, // get_space_string - 0x6a75, // get_string - 0x6a85, // getOffsetNanosecondsFor_string - 0x6aa9, // getOwnPropertyDescriptor_string - 0x6acd, // getPossibleInstantsFor_string - 0x6af1, // getPrototypeOf_string - 0x6b0d, // global_string - 0x6b21, // globalThis_string - 0x6b39, // groups_string - 0x6b4d, // growable_string - 0x6b61, // has_string - 0x6b71, // hasIndices_string - 0x6b89, // hour_string - 0x6b99, // hours_string - 0x6bad, // hoursInDay_string - 0x6bc5, // ignoreCase_string - 0x6bdd, // id_string - 0x6bed, // illegal_access_string - 0x6c09, // illegal_argument_string - 0x6c25, // inLeapYear_string - 0x6c3d, // index_string - 0x6c51, // indices_string - 0x6c65, // Infinity_string - 0x6c79, // infinity_string - 0x6c8d, // input_string - 0x6ca1, // Int16Array_string - 0x6cb9, // Int32Array_string - 0x6cd1, // Int8Array_string - 0x6ce9, // isExtensible_string - 0x6d01, // iso8601_string - 0x6d15, // isoDay_string - 0x6d29, // isoHour_string - 0x6d3d, // isoMicrosecond_string - 0x6d59, // isoMillisecond_string - 0x6d75, // isoMinute_string - 0x6d8d, // isoMonth_string - 0x6da1, // isoNanosecond_string - 0x6dbd, // isoSecond_string - 0x6dd5, // isoYear_string - 0x6de9, // jsMemoryEstimate_string - 0x6e05, // jsMemoryRange_string - 0x6e21, // keys_string - 0x6e31, // largestUnit_string - 0x6e49, // lastIndex_string - 0x6e61, // length_string - 0x6e75, // let_string - 0x6e85, // line_string - 0x6e95, // linear_string - 0x6ea9, // LinkError_string - 0x6ec1, // long_string - 0x6ed1, // Map_string - 0x6ee1, // MapIterator_string - 0x6ef9, // max_byte_length_string - 0x6f15, // medium_string - 0x6f29, // mergeFields_string - 0x6f41, // message_string - 0x6f55, // meta_string - 0x6f65, // minus_Infinity_string - 0x6f7d, // microsecond_string - 0x6f95, // microseconds_string - 0x6fad, // millisecond_string - 0x6fc5, // milliseconds_string - 0x6fdd, // minute_string - 0x6ff1, // minutes_string - 0x7005, // Module_string - 0x7019, // month_string - 0x702d, // monthDayFromFields_string - 0x704d, // months_string - 0x7061, // monthsInYear_string - 0x7079, // monthCode_string - 0x7091, // multiline_string - 0x70a9, // name_string - 0x70b9, // NaN_string - 0x70c9, // nanosecond_string - 0x70e1, // nanoseconds_string - 0x70f9, // narrow_string - 0x710d, // native_string - 0x6731, // new_target_string - 0x7121, // NFC_string - 0x7131, // NFD_string - 0x7141, // NFKC_string - 0x7151, // NFKD_string - 0x7161, // not_equal_string - 0x7179, // null_string - 0x7189, // null_to_string - 0x71a5, // Number_string - 0x71b9, // number_string - 0x71cd, // number_to_string - 0x71e9, // Object_string - 0x71fd, // object_string - 0x7211, // object_to_string - 0x722d, // of_string - 0x723d, // offset_string - 0x7251, // offsetNanoseconds_string - 0x7271, // ok_string - 0x474d, // one_string - 0x7281, // other_string - 0x7295, // overflow_string - 0x72a9, // ownKeys_string - 0x72bd, // percent_string - 0x72d1, // plainDate_string - 0x72e9, // plainTime_string - 0x7301, // position_string - 0x7315, // preventExtensions_string - 0x7335, // private_constructor_string - 0x734d, // Promise_string - 0x7361, // proto_string - 0x7379, // prototype_string - 0x7391, // proxy_string - 0x73a5, // Proxy_string - 0x73b9, // query_colon_string - 0x73c9, // RangeError_string - 0x73e1, // raw_json_string - 0x73f5, // raw_string - 0x7405, // ReferenceError_string - 0x7421, // ReflectGet_string - 0x7439, // ReflectHas_string - 0x7451, // RegExp_string - 0x7465, // regexp_to_string - 0x7481, // reject_string - 0x7495, // relativeTo_string - 0x74ad, // resizable_string - 0x74c5, // ResizableArrayBuffer_string - 0x74e5, // return_string - 0x74f9, // revoke_string - 0x750d, // roundingIncrement_string - 0x752d, // RuntimeError_string - 0x7545, // WebAssemblyException_string - 0x7569, // Script_string - 0x757d, // script_string - 0x7591, // second_string - 0x75a5, // seconds_string - 0x75b9, // short_string - 0x75cd, // Set_string - 0x75dd, // sentence_string - 0x75f1, // set_space_string - 0x7601, // set_string - 0x7611, // SetIterator_string - 0x7629, // setPrototypeOf_string - 0x7645, // ShadowRealm_string - 0x765d, // SharedArray_string - 0x7675, // SharedArrayBuffer_string - 0x7695, // SharedStruct_string - 0x76ad, // sign_string - 0x76bd, // smallestUnit_string - 0x76d5, // source_string - 0x76e9, // sourceText_string - 0x7701, // stack_string - 0x7715, // stackTraceLimit_string - 0x7731, // sticky_string - 0x7745, // String_string - 0x7759, // string_string - 0x776d, // string_to_string - 0x7789, // Symbol_iterator_string - 0x77a5, // symbol_species_string - 0x77c1, // Symbol_species_string - 0x77dd, // Symbol_string - 0x77f1, // symbol_string - 0x7805, // SyntaxError_string - 0x781d, // target_string - 0x7831, // this_function_string - 0x784d, // this_string - 0x785d, // throw_string - 0x7871, // timed_out_string - 0x7889, // timeZone_string - 0x789d, // toJSON_string - 0x78b1, // toString_string - 0x78c5, // true_string - 0x78d5, // total_string - 0x78e9, // TypeError_string - 0x7901, // Uint16Array_string - 0x7919, // Uint32Array_string - 0x7931, // Uint8Array_string - 0x7949, // Uint8ClampedArray_string - 0x7969, // undefined_string - 0x7981, // undefined_to_string - 0x79a1, // unicode_string - 0x79b5, // unicodeSets_string - 0x79cd, // unit_string - 0x79dd, // URIError_string - 0x79f1, // UTC_string - 0x7a01, // value_string - 0x7a15, // valueOf_string - 0x7a29, // WeakMap_string - 0x7a3d, // WeakRef_string - 0x7a51, // WeakSet_string - 0x7a65, // week_string - 0x7a75, // weeks_string - 0x7a89, // weekOfYear_string - 0x7aa1, // word_string - 0x7ab1, // writable_string - 0x7ac5, // yearMonthFromFields_string - 0x7ae5, // year_string - 0x7af5, // years_string - 0x473d, // zero_string - 0x7cd9, // array_buffer_wasm_memory_symbol - 0x7ce9, // call_site_info_symbol - 0x7cf9, // console_context_id_symbol - 0x7d09, // console_context_name_symbol - 0x7d19, // class_fields_symbol - 0x7d29, // class_positions_symbol - 0x7d39, // elements_transition_symbol - 0x7d49, // error_end_pos_symbol - 0x7d59, // error_script_symbol - 0x7d69, // error_stack_symbol - 0x7d79, // error_start_pos_symbol - 0x7d89, // frozen_symbol - 0x7d99, // interpreter_trampoline_symbol - 0x7da9, // mega_dom_symbol - 0x7db9, // megamorphic_symbol - 0x7dc9, // native_context_index_symbol - 0x7dd9, // nonextensible_symbol - 0x7de9, // not_mapped_symbol - 0x7df9, // promise_debug_marker_symbol - 0x7e09, // promise_debug_message_symbol - 0x7e19, // promise_forwarding_handler_symbol - 0x7e29, // promise_handled_by_symbol - 0x7e39, // promise_awaited_by_symbol - 0x7e49, // regexp_result_names_symbol - 0x7e59, // regexp_result_regexp_input_symbol - 0x7e69, // regexp_result_regexp_last_index_symbol - 0x7e79, // sealed_symbol - 0x7e89, // strict_function_transition_symbol - 0x7e99, // template_literal_function_literal_id_symbol - 0x7ea9, // template_literal_slot_id_symbol - 0x7eb9, // wasm_exception_tag_symbol - 0x7ec9, // wasm_exception_values_symbol - 0x7ed9, // wasm_uncatchable_symbol - 0x7ee9, // wasm_wrapped_object_symbol - 0x7ef9, // wasm_debug_proxy_cache_symbol - 0x7f09, // wasm_debug_proxy_names_symbol - 0x7f19, // uninitialized_symbol - 0x7f29, // async_iterator_symbol - 0x7f59, // intl_fallback_symbol - 0x7f91, // match_all_symbol - 0x7fbd, // match_symbol - 0x7fe5, // replace_symbol - 0x8011, // search_symbol - 0x803d, // split_symbol - 0x8065, // to_primitive_symbol - 0x8095, // unscopables_symbol - 0x80c5, // has_instance_symbol - 0x80f5, // to_string_tag_symbol - 0x2319, // promise_fulfill_reaction_job_task_map - 0x2341, // promise_reject_reaction_job_task_map - 0x2369, // callable_task_map - 0x2391, // callback_task_map - 0x23b9, // promise_resolve_thenable_job_task_map - 0x23e1, // function_template_info_map - 0x2409, // object_template_info_map - 0x2431, // access_check_info_map - 0x2459, // accessor_pair_map - 0x2481, // aliased_arguments_entry_map - 0x24a9, // allocation_memento_map - 0x24d1, // array_boilerplate_description_map - 0x24f9, // asm_wasm_data_map - 0x2521, // async_generator_request_map - 0x2549, // break_point_map - 0x2571, // break_point_info_map - 0x2599, // call_site_info_map - 0x25c1, // class_positions_map - 0x25e9, // debug_info_map - 0x2611, // enum_cache_map - 0x2639, // error_stack_data_map - 0x2661, // function_template_rare_data_map - 0x2689, // interceptor_info_map - 0x26b1, // interpreter_data_map - 0x26d9, // module_request_map - 0x2701, // promise_capability_map - 0x2729, // promise_on_stack_map - 0x2751, // promise_reaction_map - 0x2779, // property_descriptor_object_map - 0x27a1, // prototype_info_map - 0x27c9, // regexp_boilerplate_description_map - 0x27f1, // script_map - 0x2819, // script_or_module_map - 0x2841, // module_info_entry_map - 0x2869, // stack_frame_info_map - 0x2891, // template_object_description_map - 0x28b9, // tuple2_map - 0x28e1, // wasm_exception_tag_map - 0x2909, // wasm_indirect_function_table_map - 0x370d, // sloppy_arguments_elements_map - 0x2209, // descriptor_array_map - 0x3735, // strong_descriptor_array_map - 0x32fd, // uncompiled_data_without_preparse_data_map - 0x3325, // uncompiled_data_with_preparse_data_map - 0x334d, // uncompiled_data_without_preparse_data_with_job_map - 0x3375, // uncompiled_data_with_preparse_data_and_job_map - 0x339d, // on_heap_basic_block_profiler_data_map - 0x33c5, // turbofan_bitset_type_map - 0x33ed, // turbofan_union_type_map - 0x3415, // turbofan_range_type_map - 0x343d, // turbofan_heap_constant_type_map - 0x3465, // turbofan_other_number_constant_type_map - 0x348d, // turboshaft_word32type_map - 0x34b5, // turboshaft_word32range_type_map - 0x375d, // turboshaft_word32set_type_map - 0x34dd, // turboshaft_word64type_map - 0x3505, // turboshaft_word64range_type_map - 0x3785, // turboshaft_word64set_type_map - 0x352d, // turboshaft_float64type_map - 0x3555, // turboshaft_float64range_type_map - 0x37ad, // turboshaft_float64set_type_map - 0x357d, // internal_class_map - 0x35a5, // smi_pair_map - 0x35cd, // smi_box_map - 0x35f5, // exported_sub_class_base_map - 0x361d, // exported_sub_class_map - 0x3645, // abstract_internal_class_subclass1_map - 0x366d, // abstract_internal_class_subclass2_map - 0x37d5, // internal_class_with_smi_elements_map - 0x37fd, // internal_class_with_struct_elements_map - 0x3695, // exported_sub_class2_map - 0x36bd, // sort_state_map - 0x36e5, // wasm_string_view_iter_map - 0x2931, // allocation_site_map - 0x2959, // allocation_site_without_weaknext_map - 0x814d, // constructor_string - 0x8165, // next_string - 0x8175, // resolve_string - 0x8189, // then_string - 0x8199, // iterator_symbol - 0x81a9, // species_symbol - 0x81b9, // is_concat_spreadable_symbol - 0x2981, // load_handler1_map - 0x29a9, // load_handler2_map - 0x29d1, // load_handler3_map - 0x29f9, // store_handler0_map - 0x2a21, // store_handler1_map - 0x2a49, // store_handler2_map - 0x2a71, // store_handler3_map -}; +// TODO(olivf, v8:13466): Enable and add static roots +constexpr static std::array StaticReadOnlyRootsPointerTable = {}; } // namespace internal } // namespace v8 diff --git a/src/snapshot/static-roots-gen.cc b/src/snapshot/static-roots-gen.cc index 12d9265ad3..772b340b53 100644 --- a/src/snapshot/static-roots-gen.cc +++ b/src/snapshot/static-roots-gen.cc @@ -30,9 +30,6 @@ void StaticRootsTableGen::write(Isolate* isolate, const char* file) { "that can be\n" << "// found in the LICENSE file.\n" << "\n" - << "// This file is automatically generated by " - "`tools/dev/gen-static-roots.py`. Do\n// not edit manually.\n" - << "\n" << "#ifndef V8_ROOTS_STATIC_ROOTS_H_\n" << "#define V8_ROOTS_STATIC_ROOTS_H_\n" << "\n" diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py index 5f65d0b450..19e3330fd5 100644 --- a/tools/v8heapconst.py +++ b/tools/v8heapconst.py @@ -295,250 +295,250 @@ INSTANCE_TYPES = { # List of known V8 maps. KNOWN_MAPS = { ("read_only_space", 0x02141): (255, "MetaMap"), - ("read_only_space", 0x02169): (175, "FixedArrayMap"), - ("read_only_space", 0x02191): (240, "WeakFixedArrayMap"), + ("read_only_space", 0x02169): (131, "NullMap"), + ("read_only_space", 0x02191): (237, "StrongDescriptorArrayMap"), ("read_only_space", 0x021b9): (273, "WeakArrayListMap"), - ("read_only_space", 0x021e1): (175, "FixedCOWArrayMap"), - ("read_only_space", 0x02209): (236, "DescriptorArrayMap"), - ("read_only_space", 0x02231): (131, "UndefinedMap"), - ("read_only_space", 0x02259): (131, "NullMap"), - ("read_only_space", 0x02281): (131, "TheHoleMap"), - ("read_only_space", 0x02319): (132, "PromiseFulfillReactionJobTaskMap"), - ("read_only_space", 0x02341): (133, "PromiseRejectReactionJobTaskMap"), - ("read_only_space", 0x02369): (134, "CallableTaskMap"), - ("read_only_space", 0x02391): (135, "CallbackTaskMap"), - ("read_only_space", 0x023b9): (136, "PromiseResolveThenableJobTaskMap"), - ("read_only_space", 0x023e1): (139, "FunctionTemplateInfoMap"), - ("read_only_space", 0x02409): (140, "ObjectTemplateInfoMap"), - ("read_only_space", 0x02431): (141, "AccessCheckInfoMap"), - ("read_only_space", 0x02459): (142, "AccessorPairMap"), - ("read_only_space", 0x02481): (143, "AliasedArgumentsEntryMap"), - ("read_only_space", 0x024a9): (144, "AllocationMementoMap"), - ("read_only_space", 0x024d1): (146, "ArrayBoilerplateDescriptionMap"), - ("read_only_space", 0x024f9): (147, "AsmWasmDataMap"), - ("read_only_space", 0x02521): (148, "AsyncGeneratorRequestMap"), - ("read_only_space", 0x02549): (149, "BreakPointMap"), - ("read_only_space", 0x02571): (150, "BreakPointInfoMap"), - ("read_only_space", 0x02599): (151, "CallSiteInfoMap"), - ("read_only_space", 0x025c1): (152, "ClassPositionsMap"), - ("read_only_space", 0x025e9): (153, "DebugInfoMap"), - ("read_only_space", 0x02611): (154, "EnumCacheMap"), - ("read_only_space", 0x02639): (155, "ErrorStackDataMap"), - ("read_only_space", 0x02661): (157, "FunctionTemplateRareDataMap"), - ("read_only_space", 0x02689): (158, "InterceptorInfoMap"), - ("read_only_space", 0x026b1): (159, "InterpreterDataMap"), - ("read_only_space", 0x026d9): (160, "ModuleRequestMap"), - ("read_only_space", 0x02701): (161, "PromiseCapabilityMap"), - ("read_only_space", 0x02729): (162, "PromiseOnStackMap"), - ("read_only_space", 0x02751): (163, "PromiseReactionMap"), - ("read_only_space", 0x02779): (164, "PropertyDescriptorObjectMap"), - ("read_only_space", 0x027a1): (165, "PrototypeInfoMap"), - ("read_only_space", 0x027c9): (166, "RegExpBoilerplateDescriptionMap"), - ("read_only_space", 0x027f1): (167, "ScriptMap"), - ("read_only_space", 0x02819): (168, "ScriptOrModuleMap"), - ("read_only_space", 0x02841): (169, "SourceTextModuleInfoEntryMap"), - ("read_only_space", 0x02869): (170, "StackFrameInfoMap"), - ("read_only_space", 0x02891): (171, "TemplateObjectDescriptionMap"), - ("read_only_space", 0x028b9): (172, "Tuple2Map"), - ("read_only_space", 0x028e1): (173, "WasmExceptionTagMap"), - ("read_only_space", 0x02909): (174, "WasmIndirectFunctionTableMap"), - ("read_only_space", 0x02931): (145, "AllocationSiteWithWeakNextMap"), - ("read_only_space", 0x02959): (145, "AllocationSiteWithoutWeakNextMap"), - ("read_only_space", 0x02981): (137, "LoadHandler1Map"), - ("read_only_space", 0x029a9): (137, "LoadHandler2Map"), - ("read_only_space", 0x029d1): (137, "LoadHandler3Map"), - ("read_only_space", 0x029f9): (138, "StoreHandler0Map"), - ("read_only_space", 0x02a21): (138, "StoreHandler1Map"), - ("read_only_space", 0x02a49): (138, "StoreHandler2Map"), - ("read_only_space", 0x02a71): (138, "StoreHandler3Map"), - ("read_only_space", 0x02ab5): (261, "ScopeInfoMap"), - ("read_only_space", 0x02add): (175, "ModuleInfoMap"), - ("read_only_space", 0x02b05): (187, "ClosureFeedbackCellArrayMap"), - ("read_only_space", 0x02b2d): (250, "FeedbackVectorMap"), - ("read_only_space", 0x02b55): (130, "HeapNumberMap"), - ("read_only_space", 0x02b7d): (128, "SymbolMap"), - ("read_only_space", 0x02ba5): (204, "ForeignMap"), - ("read_only_space", 0x02bcd): (256, "MegaDomHandlerMap"), - ("read_only_space", 0x02bf5): (131, "BooleanMap"), - ("read_only_space", 0x02c1d): (131, "UninitializedMap"), - ("read_only_space", 0x02c45): (131, "ArgumentsMarkerMap"), - ("read_only_space", 0x02c6d): (131, "ExceptionMap"), - ("read_only_space", 0x02c95): (131, "TerminationExceptionMap"), - ("read_only_space", 0x02cbd): (131, "OptimizedOutMap"), - ("read_only_space", 0x02ce5): (131, "StaleRegisterMap"), - ("read_only_space", 0x02d0d): (131, "SelfReferenceMarkerMap"), - ("read_only_space", 0x02d35): (131, "BasicBlockCountersMarkerMap"), - ("read_only_space", 0x02d5d): (129, "BigIntMap"), - ("read_only_space", 0x02d85): (32, "StringMap"), - ("read_only_space", 0x02dad): (40, "OneByteStringMap"), - ("read_only_space", 0x02dd5): (33, "ConsStringMap"), - ("read_only_space", 0x02dfd): (41, "ConsOneByteStringMap"), - ("read_only_space", 0x02e25): (35, "SlicedStringMap"), - ("read_only_space", 0x02e4d): (43, "SlicedOneByteStringMap"), - ("read_only_space", 0x02e75): (34, "ExternalStringMap"), - ("read_only_space", 0x02e9d): (42, "ExternalOneByteStringMap"), - ("read_only_space", 0x02ec5): (50, "UncachedExternalStringMap"), - ("read_only_space", 0x02eed): (58, "UncachedExternalOneByteStringMap"), - ("read_only_space", 0x02f15): (0, "InternalizedStringMap"), - ("read_only_space", 0x02f3d): (8, "OneByteInternalizedStringMap"), - ("read_only_space", 0x02f65): (2, "ExternalInternalizedStringMap"), - ("read_only_space", 0x02f8d): (10, "ExternalOneByteInternalizedStringMap"), - ("read_only_space", 0x02fb5): (18, "UncachedExternalInternalizedStringMap"), - ("read_only_space", 0x02fdd): (26, "UncachedExternalOneByteInternalizedStringMap"), - ("read_only_space", 0x03005): (37, "ThinStringMap"), - ("read_only_space", 0x0302d): (45, "ThinOneByteStringMap"), - ("read_only_space", 0x03055): (96, "SharedStringMap"), - ("read_only_space", 0x0307d): (104, "SharedOneByteStringMap"), - ("read_only_space", 0x030a5): (98, "SharedExternalStringMap"), - ("read_only_space", 0x030cd): (106, "SharedExternalOneByteStringMap"), - ("read_only_space", 0x030f5): (114, "SharedUncachedExternalStringMap"), - ("read_only_space", 0x0311d): (122, "SharedUncachedExternalOneByteStringMap"), - ("read_only_space", 0x03145): (101, "SharedThinStringMap"), - ("read_only_space", 0x0316d): (109, "SharedThinOneByteStringMap"), - ("read_only_space", 0x03195): (192, "FixedDoubleArrayMap"), - ("read_only_space", 0x031bd): (249, "FeedbackMetadataArrayMap"), - ("read_only_space", 0x031e5): (190, "ByteArrayMap"), - ("read_only_space", 0x0320d): (191, "BytecodeArrayMap"), - ("read_only_space", 0x03235): (252, "FreeSpaceMap"), - ("read_only_space", 0x0325d): (259, "PropertyArrayMap"), - ("read_only_space", 0x03285): (231, "SmallOrderedHashMapMap"), - ("read_only_space", 0x032ad): (232, "SmallOrderedHashSetMap"), - ("read_only_space", 0x032d5): (233, "SmallOrderedNameDictionaryMap"), - ("read_only_space", 0x032fd): (222, "UncompiledDataWithoutPreparseDataMap"), - ("read_only_space", 0x03325): (220, "UncompiledDataWithPreparseDataMap"), - ("read_only_space", 0x0334d): (223, "UncompiledDataWithoutPreparseDataWithJobMap"), - ("read_only_space", 0x03375): (221, "UncompiledDataWithPreparseDataAndJobMap"), - ("read_only_space", 0x0339d): (257, "OnHeapBasicBlockProfilerDataMap"), - ("read_only_space", 0x033c5): (215, "TurbofanBitsetTypeMap"), - ("read_only_space", 0x033ed): (219, "TurbofanUnionTypeMap"), - ("read_only_space", 0x03415): (218, "TurbofanRangeTypeMap"), - ("read_only_space", 0x0343d): (216, "TurbofanHeapConstantTypeMap"), - ("read_only_space", 0x03465): (217, "TurbofanOtherNumberConstantTypeMap"), - ("read_only_space", 0x0348d): (198, "TurboshaftWord32TypeMap"), - ("read_only_space", 0x034b5): (199, "TurboshaftWord32RangeTypeMap"), - ("read_only_space", 0x034dd): (201, "TurboshaftWord64TypeMap"), - ("read_only_space", 0x03505): (202, "TurboshaftWord64RangeTypeMap"), - ("read_only_space", 0x0352d): (195, "TurboshaftFloat64TypeMap"), - ("read_only_space", 0x03555): (196, "TurboshaftFloat64RangeTypeMap"), - ("read_only_space", 0x0357d): (253, "InternalClassMap"), - ("read_only_space", 0x035a5): (264, "SmiPairMap"), - ("read_only_space", 0x035cd): (263, "SmiBoxMap"), - ("read_only_space", 0x035f5): (228, "ExportedSubClassBaseMap"), - ("read_only_space", 0x0361d): (229, "ExportedSubClassMap"), - ("read_only_space", 0x03645): (234, "AbstractInternalClassSubclass1Map"), - ("read_only_space", 0x0366d): (235, "AbstractInternalClassSubclass2Map"), - ("read_only_space", 0x03695): (230, "ExportedSubClass2Map"), - ("read_only_space", 0x036bd): (265, "SortStateMap"), - ("read_only_space", 0x036e5): (271, "WasmStringViewIterMap"), - ("read_only_space", 0x0370d): (194, "SloppyArgumentsElementsMap"), - ("read_only_space", 0x03735): (237, "StrongDescriptorArrayMap"), - ("read_only_space", 0x0375d): (200, "TurboshaftWord32SetTypeMap"), - ("read_only_space", 0x03785): (203, "TurboshaftWord64SetTypeMap"), - ("read_only_space", 0x037ad): (197, "TurboshaftFloat64SetTypeMap"), - ("read_only_space", 0x037d5): (193, "InternalClassWithSmiElementsMap"), - ("read_only_space", 0x037fd): (254, "InternalClassWithStructElementsMap"), - ("read_only_space", 0x03825): (245, "CodeMap"), - ("read_only_space", 0x0384d): (244, "CellMap"), - ("read_only_space", 0x0387d): (260, "GlobalPropertyCellMap"), - ("read_only_space", 0x038a5): (251, "OnePointerFillerMap"), - ("read_only_space", 0x038cd): (251, "TwoPointerFillerMap"), - ("read_only_space", 0x038f5): (156, "NoClosuresCellMap"), - ("read_only_space", 0x0391d): (156, "OneClosureCellMap"), - ("read_only_space", 0x03945): (156, "ManyClosuresCellMap"), - ("read_only_space", 0x0396d): (241, "TransitionArrayMap"), - ("read_only_space", 0x03995): (176, "HashTableMap"), - ("read_only_space", 0x039bd): (182, "OrderedHashMapMap"), - ("read_only_space", 0x039e5): (183, "OrderedHashSetMap"), - ("read_only_space", 0x03a0d): (184, "OrderedNameDictionaryMap"), - ("read_only_space", 0x03a35): (179, "NameDictionaryMap"), - ("read_only_space", 0x03a5d): (266, "SwissNameDictionaryMap"), - ("read_only_space", 0x03a85): (178, "GlobalDictionaryMap"), - ("read_only_space", 0x03aad): (181, "NumberDictionaryMap"), - ("read_only_space", 0x03ad5): (186, "SimpleNumberDictionaryMap"), - ("read_only_space", 0x03afd): (180, "NameToIndexHashTableMap"), - ("read_only_space", 0x03b25): (185, "RegisteredSymbolTableMap"), - ("read_only_space", 0x03b4d): (248, "EmbedderDataArrayMap"), - ("read_only_space", 0x03b75): (177, "EphemeronHashTableMap"), - ("read_only_space", 0x03b9d): (175, "ArrayListMap"), - ("read_only_space", 0x03bc5): (189, "ScriptContextTableMap"), - ("read_only_space", 0x03bed): (188, "ObjectBoilerplateDescriptionMap"), - ("read_only_space", 0x03c15): (247, "CoverageInfoMap"), - ("read_only_space", 0x03c3d): (242, "AccessorInfoMap"), - ("read_only_space", 0x03c65): (243, "SideEffectCallHandlerInfoMap"), - ("read_only_space", 0x03c8d): (243, "SideEffectFreeCallHandlerInfoMap"), - ("read_only_space", 0x03cb5): (243, "NextCallSideEffectFreeCallHandlerInfoMap"), - ("read_only_space", 0x03cdd): (258, "PreparseDataMap"), - ("read_only_space", 0x03d05): (262, "SharedFunctionInfoMap"), - ("read_only_space", 0x03d2d): (238, "SourceTextModuleMap"), - ("read_only_space", 0x03d55): (239, "SyntheticModuleMap"), - ("read_only_space", 0x03d7d): (246, "CodeDataContainerMap"), - ("read_only_space", 0x03da5): (267, "WasmApiFunctionRefMap"), - ("read_only_space", 0x03dcd): (225, "WasmCapiFunctionDataMap"), - ("read_only_space", 0x03df5): (226, "WasmExportedFunctionDataMap"), - ("read_only_space", 0x03e1d): (269, "WasmInternalFunctionMap"), - ("read_only_space", 0x03e45): (227, "WasmJSFunctionDataMap"), - ("read_only_space", 0x03e6d): (270, "WasmResumeDataMap"), - ("read_only_space", 0x03e95): (272, "WasmTypeInfoMap"), - ("read_only_space", 0x03ebd): (268, "WasmContinuationObjectMap"), - ("read_only_space", 0x03ee5): (274, "WeakCellMap"), + ("read_only_space", 0x021fd): (154, "EnumCacheMap"), + ("read_only_space", 0x02231): (175, "FixedArrayMap"), + ("read_only_space", 0x0227d): (8, "OneByteInternalizedStringMap"), + ("read_only_space", 0x022c9): (252, "FreeSpaceMap"), + ("read_only_space", 0x022f1): (251, "OnePointerFillerMap"), + ("read_only_space", 0x02319): (251, "TwoPointerFillerMap"), + ("read_only_space", 0x02341): (131, "UninitializedMap"), + ("read_only_space", 0x023b9): (131, "UndefinedMap"), + ("read_only_space", 0x023fd): (130, "HeapNumberMap"), + ("read_only_space", 0x02431): (131, "TheHoleMap"), + ("read_only_space", 0x02491): (131, "BooleanMap"), + ("read_only_space", 0x02535): (190, "ByteArrayMap"), + ("read_only_space", 0x0255d): (175, "FixedCOWArrayMap"), + ("read_only_space", 0x02585): (176, "HashTableMap"), + ("read_only_space", 0x025ad): (128, "SymbolMap"), + ("read_only_space", 0x025d5): (40, "OneByteStringMap"), + ("read_only_space", 0x025fd): (261, "ScopeInfoMap"), + ("read_only_space", 0x02625): (262, "SharedFunctionInfoMap"), + ("read_only_space", 0x0264d): (245, "CodeMap"), + ("read_only_space", 0x02675): (244, "CellMap"), + ("read_only_space", 0x0269d): (260, "GlobalPropertyCellMap"), + ("read_only_space", 0x026c5): (204, "ForeignMap"), + ("read_only_space", 0x026ed): (241, "TransitionArrayMap"), + ("read_only_space", 0x02715): (45, "ThinOneByteStringMap"), + ("read_only_space", 0x0273d): (250, "FeedbackVectorMap"), + ("read_only_space", 0x02775): (131, "ArgumentsMarkerMap"), + ("read_only_space", 0x027d5): (131, "ExceptionMap"), + ("read_only_space", 0x02831): (131, "TerminationExceptionMap"), + ("read_only_space", 0x02899): (131, "OptimizedOutMap"), + ("read_only_space", 0x028f9): (131, "StaleRegisterMap"), + ("read_only_space", 0x02959): (189, "ScriptContextTableMap"), + ("read_only_space", 0x02981): (187, "ClosureFeedbackCellArrayMap"), + ("read_only_space", 0x029a9): (249, "FeedbackMetadataArrayMap"), + ("read_only_space", 0x029d1): (175, "ArrayListMap"), + ("read_only_space", 0x029f9): (129, "BigIntMap"), + ("read_only_space", 0x02a21): (188, "ObjectBoilerplateDescriptionMap"), + ("read_only_space", 0x02a49): (191, "BytecodeArrayMap"), + ("read_only_space", 0x02a71): (246, "CodeDataContainerMap"), + ("read_only_space", 0x02a99): (247, "CoverageInfoMap"), + ("read_only_space", 0x02ac1): (192, "FixedDoubleArrayMap"), + ("read_only_space", 0x02ae9): (178, "GlobalDictionaryMap"), + ("read_only_space", 0x02b11): (156, "ManyClosuresCellMap"), + ("read_only_space", 0x02b39): (256, "MegaDomHandlerMap"), + ("read_only_space", 0x02b61): (175, "ModuleInfoMap"), + ("read_only_space", 0x02b89): (179, "NameDictionaryMap"), + ("read_only_space", 0x02bb1): (156, "NoClosuresCellMap"), + ("read_only_space", 0x02bd9): (181, "NumberDictionaryMap"), + ("read_only_space", 0x02c01): (156, "OneClosureCellMap"), + ("read_only_space", 0x02c29): (182, "OrderedHashMapMap"), + ("read_only_space", 0x02c51): (183, "OrderedHashSetMap"), + ("read_only_space", 0x02c79): (180, "NameToIndexHashTableMap"), + ("read_only_space", 0x02ca1): (185, "RegisteredSymbolTableMap"), + ("read_only_space", 0x02cc9): (184, "OrderedNameDictionaryMap"), + ("read_only_space", 0x02cf1): (258, "PreparseDataMap"), + ("read_only_space", 0x02d19): (259, "PropertyArrayMap"), + ("read_only_space", 0x02d41): (242, "AccessorInfoMap"), + ("read_only_space", 0x02d69): (243, "SideEffectCallHandlerInfoMap"), + ("read_only_space", 0x02d91): (243, "SideEffectFreeCallHandlerInfoMap"), + ("read_only_space", 0x02db9): (243, "NextCallSideEffectFreeCallHandlerInfoMap"), + ("read_only_space", 0x02de1): (186, "SimpleNumberDictionaryMap"), + ("read_only_space", 0x02e09): (231, "SmallOrderedHashMapMap"), + ("read_only_space", 0x02e31): (232, "SmallOrderedHashSetMap"), + ("read_only_space", 0x02e59): (233, "SmallOrderedNameDictionaryMap"), + ("read_only_space", 0x02e81): (238, "SourceTextModuleMap"), + ("read_only_space", 0x02ea9): (266, "SwissNameDictionaryMap"), + ("read_only_space", 0x02ed1): (239, "SyntheticModuleMap"), + ("read_only_space", 0x02ef9): (267, "WasmApiFunctionRefMap"), + ("read_only_space", 0x02f21): (225, "WasmCapiFunctionDataMap"), + ("read_only_space", 0x02f49): (226, "WasmExportedFunctionDataMap"), + ("read_only_space", 0x02f71): (269, "WasmInternalFunctionMap"), + ("read_only_space", 0x02f99): (227, "WasmJSFunctionDataMap"), + ("read_only_space", 0x02fc1): (270, "WasmResumeDataMap"), + ("read_only_space", 0x02fe9): (272, "WasmTypeInfoMap"), + ("read_only_space", 0x03011): (268, "WasmContinuationObjectMap"), + ("read_only_space", 0x03039): (240, "WeakFixedArrayMap"), + ("read_only_space", 0x03061): (177, "EphemeronHashTableMap"), + ("read_only_space", 0x03089): (248, "EmbedderDataArrayMap"), + ("read_only_space", 0x030b1): (274, "WeakCellMap"), + ("read_only_space", 0x030d9): (32, "StringMap"), + ("read_only_space", 0x03101): (41, "ConsOneByteStringMap"), + ("read_only_space", 0x03129): (33, "ConsStringMap"), + ("read_only_space", 0x03151): (37, "ThinStringMap"), + ("read_only_space", 0x03179): (35, "SlicedStringMap"), + ("read_only_space", 0x031a1): (43, "SlicedOneByteStringMap"), + ("read_only_space", 0x031c9): (34, "ExternalStringMap"), + ("read_only_space", 0x031f1): (42, "ExternalOneByteStringMap"), + ("read_only_space", 0x03219): (50, "UncachedExternalStringMap"), + ("read_only_space", 0x03241): (0, "InternalizedStringMap"), + ("read_only_space", 0x03269): (2, "ExternalInternalizedStringMap"), + ("read_only_space", 0x03291): (10, "ExternalOneByteInternalizedStringMap"), + ("read_only_space", 0x032b9): (18, "UncachedExternalInternalizedStringMap"), + ("read_only_space", 0x032e1): (26, "UncachedExternalOneByteInternalizedStringMap"), + ("read_only_space", 0x03309): (58, "UncachedExternalOneByteStringMap"), + ("read_only_space", 0x03331): (104, "SharedOneByteStringMap"), + ("read_only_space", 0x03359): (96, "SharedStringMap"), + ("read_only_space", 0x03381): (106, "SharedExternalOneByteStringMap"), + ("read_only_space", 0x033a9): (98, "SharedExternalStringMap"), + ("read_only_space", 0x033d1): (122, "SharedUncachedExternalOneByteStringMap"), + ("read_only_space", 0x033f9): (114, "SharedUncachedExternalStringMap"), + ("read_only_space", 0x03421): (109, "SharedThinOneByteStringMap"), + ("read_only_space", 0x03449): (101, "SharedThinStringMap"), + ("read_only_space", 0x03471): (131, "SelfReferenceMarkerMap"), + ("read_only_space", 0x03499): (131, "BasicBlockCountersMarkerMap"), + ("read_only_space", 0x034dd): (146, "ArrayBoilerplateDescriptionMap"), + ("read_only_space", 0x035dd): (158, "InterceptorInfoMap"), + ("read_only_space", 0x07655): (132, "PromiseFulfillReactionJobTaskMap"), + ("read_only_space", 0x0767d): (133, "PromiseRejectReactionJobTaskMap"), + ("read_only_space", 0x076a5): (134, "CallableTaskMap"), + ("read_only_space", 0x076cd): (135, "CallbackTaskMap"), + ("read_only_space", 0x076f5): (136, "PromiseResolveThenableJobTaskMap"), + ("read_only_space", 0x0771d): (139, "FunctionTemplateInfoMap"), + ("read_only_space", 0x07745): (140, "ObjectTemplateInfoMap"), + ("read_only_space", 0x0776d): (141, "AccessCheckInfoMap"), + ("read_only_space", 0x07795): (142, "AccessorPairMap"), + ("read_only_space", 0x077bd): (143, "AliasedArgumentsEntryMap"), + ("read_only_space", 0x077e5): (144, "AllocationMementoMap"), + ("read_only_space", 0x0780d): (147, "AsmWasmDataMap"), + ("read_only_space", 0x07835): (148, "AsyncGeneratorRequestMap"), + ("read_only_space", 0x0785d): (149, "BreakPointMap"), + ("read_only_space", 0x07885): (150, "BreakPointInfoMap"), + ("read_only_space", 0x078ad): (151, "CallSiteInfoMap"), + ("read_only_space", 0x078d5): (152, "ClassPositionsMap"), + ("read_only_space", 0x078fd): (153, "DebugInfoMap"), + ("read_only_space", 0x07925): (155, "ErrorStackDataMap"), + ("read_only_space", 0x0794d): (157, "FunctionTemplateRareDataMap"), + ("read_only_space", 0x07975): (159, "InterpreterDataMap"), + ("read_only_space", 0x0799d): (160, "ModuleRequestMap"), + ("read_only_space", 0x079c5): (161, "PromiseCapabilityMap"), + ("read_only_space", 0x079ed): (162, "PromiseOnStackMap"), + ("read_only_space", 0x07a15): (163, "PromiseReactionMap"), + ("read_only_space", 0x07a3d): (164, "PropertyDescriptorObjectMap"), + ("read_only_space", 0x07a65): (165, "PrototypeInfoMap"), + ("read_only_space", 0x07a8d): (166, "RegExpBoilerplateDescriptionMap"), + ("read_only_space", 0x07ab5): (167, "ScriptMap"), + ("read_only_space", 0x07add): (168, "ScriptOrModuleMap"), + ("read_only_space", 0x07b05): (169, "SourceTextModuleInfoEntryMap"), + ("read_only_space", 0x07b2d): (170, "StackFrameInfoMap"), + ("read_only_space", 0x07b55): (171, "TemplateObjectDescriptionMap"), + ("read_only_space", 0x07b7d): (172, "Tuple2Map"), + ("read_only_space", 0x07ba5): (173, "WasmExceptionTagMap"), + ("read_only_space", 0x07bcd): (174, "WasmIndirectFunctionTableMap"), + ("read_only_space", 0x07bf5): (194, "SloppyArgumentsElementsMap"), + ("read_only_space", 0x07c1d): (236, "DescriptorArrayMap"), + ("read_only_space", 0x07c45): (222, "UncompiledDataWithoutPreparseDataMap"), + ("read_only_space", 0x07c6d): (220, "UncompiledDataWithPreparseDataMap"), + ("read_only_space", 0x07c95): (223, "UncompiledDataWithoutPreparseDataWithJobMap"), + ("read_only_space", 0x07cbd): (221, "UncompiledDataWithPreparseDataAndJobMap"), + ("read_only_space", 0x07ce5): (257, "OnHeapBasicBlockProfilerDataMap"), + ("read_only_space", 0x07d0d): (215, "TurbofanBitsetTypeMap"), + ("read_only_space", 0x07d35): (219, "TurbofanUnionTypeMap"), + ("read_only_space", 0x07d5d): (218, "TurbofanRangeTypeMap"), + ("read_only_space", 0x07d85): (216, "TurbofanHeapConstantTypeMap"), + ("read_only_space", 0x07dad): (217, "TurbofanOtherNumberConstantTypeMap"), + ("read_only_space", 0x07dd5): (198, "TurboshaftWord32TypeMap"), + ("read_only_space", 0x07dfd): (199, "TurboshaftWord32RangeTypeMap"), + ("read_only_space", 0x07e25): (200, "TurboshaftWord32SetTypeMap"), + ("read_only_space", 0x07e4d): (201, "TurboshaftWord64TypeMap"), + ("read_only_space", 0x07e75): (202, "TurboshaftWord64RangeTypeMap"), + ("read_only_space", 0x07e9d): (203, "TurboshaftWord64SetTypeMap"), + ("read_only_space", 0x07ec5): (195, "TurboshaftFloat64TypeMap"), + ("read_only_space", 0x07eed): (196, "TurboshaftFloat64RangeTypeMap"), + ("read_only_space", 0x07f15): (197, "TurboshaftFloat64SetTypeMap"), + ("read_only_space", 0x07f3d): (253, "InternalClassMap"), + ("read_only_space", 0x07f65): (264, "SmiPairMap"), + ("read_only_space", 0x07f8d): (263, "SmiBoxMap"), + ("read_only_space", 0x07fb5): (228, "ExportedSubClassBaseMap"), + ("read_only_space", 0x07fdd): (229, "ExportedSubClassMap"), + ("read_only_space", 0x08005): (234, "AbstractInternalClassSubclass1Map"), + ("read_only_space", 0x0802d): (235, "AbstractInternalClassSubclass2Map"), + ("read_only_space", 0x08055): (193, "InternalClassWithSmiElementsMap"), + ("read_only_space", 0x0807d): (254, "InternalClassWithStructElementsMap"), + ("read_only_space", 0x080a5): (230, "ExportedSubClass2Map"), + ("read_only_space", 0x080cd): (265, "SortStateMap"), + ("read_only_space", 0x080f5): (271, "WasmStringViewIterMap"), + ("read_only_space", 0x0811d): (145, "AllocationSiteWithWeakNextMap"), + ("read_only_space", 0x08145): (145, "AllocationSiteWithoutWeakNextMap"), + ("read_only_space", 0x08211): (137, "LoadHandler1Map"), + ("read_only_space", 0x08239): (137, "LoadHandler2Map"), + ("read_only_space", 0x08261): (137, "LoadHandler3Map"), + ("read_only_space", 0x08289): (138, "StoreHandler0Map"), + ("read_only_space", 0x082b1): (138, "StoreHandler1Map"), + ("read_only_space", 0x082d9): (138, "StoreHandler2Map"), + ("read_only_space", 0x08301): (138, "StoreHandler3Map"), ("old_space", 0x0438d): (2116, "ExternalMap"), ("old_space", 0x043b5): (2120, "JSMessageObjectMap"), } # List of known V8 objects. KNOWN_OBJECTS = { - ("read_only_space", 0x022a9): "EmptyFixedArray", - ("read_only_space", 0x022b1): "EmptyWeakFixedArray", - ("read_only_space", 0x022b9): "EmptyWeakArrayList", - ("read_only_space", 0x022c5): "NullValue", - ("read_only_space", 0x022e1): "UndefinedValue", - ("read_only_space", 0x022fd): "TheHoleValue", - ("read_only_space", 0x02a99): "EmptyEnumCache", - ("read_only_space", 0x02aa5): "EmptyDescriptorArray", - ("read_only_space", 0x03875): "InvalidPrototypeValidityCell", - ("read_only_space", 0x03f0d): "EmptyArrayList", - ("read_only_space", 0x03f19): "EmptyScopeInfo", - ("read_only_space", 0x03f29): "EmptyObjectBoilerplateDescription", - ("read_only_space", 0x03f35): "EmptyArrayBoilerplateDescription", - ("read_only_space", 0x03f41): "TrueValue", - ("read_only_space", 0x03f5d): "FalseValue", - ("read_only_space", 0x03f79): "EmptyByteArray", - ("read_only_space", 0x03f81): "EmptyPropertyArray", - ("read_only_space", 0x03f89): "EmptyClosureFeedbackCellArray", - ("read_only_space", 0x03f91): "NoOpInterceptorInfo", - ("read_only_space", 0x03fb9): "MinusZeroValue", - ("read_only_space", 0x03fc5): "NanValue", - ("read_only_space", 0x03fd1): "HoleNanValue", - ("read_only_space", 0x03fdd): "InfinityValue", - ("read_only_space", 0x03fe9): "MinusInfinityValue", - ("read_only_space", 0x03ff5): "MaxSafeInteger", - ("read_only_space", 0x04001): "MaxUInt32", - ("read_only_space", 0x0400d): "SmiMinValue", - ("read_only_space", 0x04019): "SmiMaxValuePlusOne", - ("read_only_space", 0x04025): "HashSeed", - ("read_only_space", 0x04035): "SingleCharacterStringTable", - ("read_only_space", 0x0543d): "empty_string", - ("read_only_space", 0x07b19): "UninitializedValue", - ("read_only_space", 0x07b51): "ArgumentsMarker", - ("read_only_space", 0x07b89): "TerminationException", - ("read_only_space", 0x07bc9): "Exception", - ("read_only_space", 0x07be5): "OptimizedOut", - ("read_only_space", 0x07c1d): "StaleRegister", - ("read_only_space", 0x07c55): "SelfReferenceMarker", - ("read_only_space", 0x07c95): "BasicBlockCountersMarker", - ("read_only_space", 0x081c9): "EmptyPropertyDictionary", - ("read_only_space", 0x081f1): "EmptySymbolTable", - ("read_only_space", 0x0820d): "EmptySlowElementDictionary", - ("read_only_space", 0x08231): "EmptyOrderedHashMap", - ("read_only_space", 0x08245): "EmptyOrderedHashSet", - ("read_only_space", 0x08259): "EmptyOrderedPropertyDictionary", - ("read_only_space", 0x0827d): "EmptySwissPropertyDictionary", - ("read_only_space", 0x0829d): "EmptyFeedbackMetadata", - ("read_only_space", 0x082a9): "GlobalThisBindingScopeInfo", - ("read_only_space", 0x082c9): "EmptyFunctionScopeInfo", - ("read_only_space", 0x082ed): "NativeScopeInfo", - ("read_only_space", 0x08305): "ShadowRealmScopeInfo", - ("read_only_space", 0x0831d): "OffHeapTrampolineRelocationInfo", + ("read_only_space", 0x021e1): "EmptyWeakArrayList", + ("read_only_space", 0x021ed): "EmptyDescriptorArray", + ("read_only_space", 0x02225): "EmptyEnumCache", + ("read_only_space", 0x02259): "EmptyFixedArray", + ("read_only_space", 0x02261): "NullValue", + ("read_only_space", 0x02369): "UninitializedValue", + ("read_only_space", 0x023e1): "UndefinedValue", + ("read_only_space", 0x02425): "NanValue", + ("read_only_space", 0x02459): "TheHoleValue", + ("read_only_space", 0x02485): "HoleNanValue", + ("read_only_space", 0x024b9): "TrueValue", + ("read_only_space", 0x024f9): "FalseValue", + ("read_only_space", 0x02529): "empty_string", + ("read_only_space", 0x02765): "EmptyScopeInfo", + ("read_only_space", 0x0279d): "ArgumentsMarker", + ("read_only_space", 0x027fd): "Exception", + ("read_only_space", 0x02859): "TerminationException", + ("read_only_space", 0x028c1): "OptimizedOut", + ("read_only_space", 0x02921): "StaleRegister", + ("read_only_space", 0x034c1): "EmptyPropertyArray", + ("read_only_space", 0x034c9): "EmptyByteArray", + ("read_only_space", 0x034d1): "EmptyObjectBoilerplateDescription", + ("read_only_space", 0x03505): "EmptyArrayBoilerplateDescription", + ("read_only_space", 0x03511): "EmptyClosureFeedbackCellArray", + ("read_only_space", 0x03519): "EmptySlowElementDictionary", + ("read_only_space", 0x0353d): "EmptyOrderedHashMap", + ("read_only_space", 0x03551): "EmptyOrderedHashSet", + ("read_only_space", 0x03565): "EmptyFeedbackMetadata", + ("read_only_space", 0x03571): "EmptyPropertyDictionary", + ("read_only_space", 0x03599): "EmptyOrderedPropertyDictionary", + ("read_only_space", 0x035b1): "EmptySwissPropertyDictionary", + ("read_only_space", 0x03605): "NoOpInterceptorInfo", + ("read_only_space", 0x0362d): "EmptyArrayList", + ("read_only_space", 0x03639): "EmptyWeakFixedArray", + ("read_only_space", 0x03641): "InvalidPrototypeValidityCell", + ("read_only_space", 0x03649): "InfinityValue", + ("read_only_space", 0x03655): "MinusZeroValue", + ("read_only_space", 0x03661): "MinusInfinityValue", + ("read_only_space", 0x0366d): "MaxSafeInteger", + ("read_only_space", 0x03679): "MaxUInt32", + ("read_only_space", 0x03685): "SmiMinValue", + ("read_only_space", 0x03691): "SmiMaxValuePlusOne", + ("read_only_space", 0x0369d): "SingleCharacterStringTable", + ("read_only_space", 0x04aa5): "SelfReferenceMarker", + ("read_only_space", 0x04ae5): "BasicBlockCountersMarker", + ("read_only_space", 0x04b29): "OffHeapTrampolineRelocationInfo", + ("read_only_space", 0x04b35): "GlobalThisBindingScopeInfo", + ("read_only_space", 0x04b65): "EmptyFunctionScopeInfo", + ("read_only_space", 0x04b89): "NativeScopeInfo", + ("read_only_space", 0x04ba1): "ShadowRealmScopeInfo", + ("read_only_space", 0x04bb9): "EmptySymbolTable", + ("read_only_space", 0x04bd5): "HashSeed", ("old_space", 0x0423d): "ArgumentsIteratorAccessor", ("old_space", 0x04255): "ArrayLengthAccessor", ("old_space", 0x0426d): "BoundFunctionLengthAccessor", From 5c7fca4a61ef96a8834adb98aabe29825c6bb3a9 Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Thu, 22 Dec 2022 09:19:20 -0800 Subject: [PATCH 051/654] [string-iswellformed] Implement String#{is,to}WellFormed Bug: v8:13557 Change-Id: I6fa772c70d8307eca047fd839058279ce244f0e0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4118066 Reviewed-by: Adam Klein Commit-Queue: Shu-yu Guo Cr-Commit-Position: refs/heads/main@{#85002} --- BUILD.bazel | 2 + BUILD.gn | 2 + src/builtins/builtins-string-gen.cc | 55 +++++++++++++++++ src/builtins/builtins-string-gen.h | 4 ++ src/builtins/string-iswellformed.tq | 45 ++++++++++++++ src/builtins/string-towellformed.tq | 59 +++++++++++++++++++ src/codegen/external-reference.cc | 19 ++++++ src/codegen/external-reference.h | 2 + src/flags/flag-definitions.h | 3 +- src/init/bootstrapper.cc | 12 ++++ src/objects/string-inl.h | 16 +++++ src/objects/string.h | 4 ++ src/runtime/runtime-strings.cc | 26 ++++++++ src/runtime/runtime.h | 4 +- src/strings/unicode.cc | 31 ++++++++++ src/strings/unicode.h | 6 +- .../string-iswellformed-external-uncached.js | 42 +++++++++++++ test/test262/test262.status | 14 ----- test/test262/testcfg.py | 2 + tools/v8heapconst.py | 48 +++++++-------- 20 files changed, 355 insertions(+), 41 deletions(-) create mode 100644 src/builtins/string-iswellformed.tq create mode 100644 src/builtins/string-towellformed.tq create mode 100644 test/mjsunit/harmony/string-iswellformed-external-uncached.js diff --git a/BUILD.bazel b/BUILD.bazel index bf367db3a9..8a8c0cad50 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -861,6 +861,7 @@ filegroup( "src/builtins/string-html.tq", "src/builtins/string-includes.tq", "src/builtins/string-indexof.tq", + "src/builtins/string-iswellformed.tq", "src/builtins/string-iterator.tq", "src/builtins/string-match-search.tq", "src/builtins/string-pad.tq", @@ -870,6 +871,7 @@ filegroup( "src/builtins/string-startswith.tq", "src/builtins/string-substr.tq", "src/builtins/string-substring.tq", + "src/builtins/string-towellformed.tq", "src/builtins/string-trim.tq", "src/builtins/symbol.tq", "src/builtins/torque-internal.tq", diff --git a/BUILD.gn b/BUILD.gn index 682dfddc4a..26adeb7be9 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -1801,6 +1801,7 @@ torque_files = [ "src/builtins/string-html.tq", "src/builtins/string-includes.tq", "src/builtins/string-indexof.tq", + "src/builtins/string-iswellformed.tq", "src/builtins/string-iterator.tq", "src/builtins/string-match-search.tq", "src/builtins/string-pad.tq", @@ -1810,6 +1811,7 @@ torque_files = [ "src/builtins/string-startswith.tq", "src/builtins/string-substr.tq", "src/builtins/string-substring.tq", + "src/builtins/string-towellformed.tq", "src/builtins/string-trim.tq", "src/builtins/symbol.tq", "src/builtins/torque-internal.tq", diff --git a/src/builtins/builtins-string-gen.cc b/src/builtins/builtins-string-gen.cc index e5937e5f3d..42fed47456 100644 --- a/src/builtins/builtins-string-gen.cc +++ b/src/builtins/builtins-string-gen.cc @@ -1492,6 +1492,61 @@ TNode StringBuiltinsAssembler::LoadSurrogatePairAt( return var_result.value(); } +TNode StringBuiltinsAssembler::HasUnpairedSurrogate(TNode string, + Label* if_indirect) { + TNode instance_type = LoadInstanceType(string); + CSA_DCHECK(this, Word32Equal(Word32And(instance_type, + Int32Constant(kStringEncodingMask)), + Int32Constant(kTwoByteStringTag))); + GotoIfNot(Word32Equal(Word32And(instance_type, + Int32Constant(kIsIndirectStringMask | + kUncachedExternalStringMask)), + Int32Constant(0)), + if_indirect); + + TNode string_data = DirectStringData(string, instance_type); + TNode length = LoadStringLengthAsWord(string); + + const TNode has_unpaired_surrogate = + ExternalConstant(ExternalReference::has_unpaired_surrogate()); + return UncheckedCast( + CallCFunction(has_unpaired_surrogate, MachineType::Uint32(), + std::make_pair(MachineType::Pointer(), string_data), + std::make_pair(MachineType::IntPtr(), length))); +} + +void StringBuiltinsAssembler::ReplaceUnpairedSurrogates(TNode source, + TNode dest, + Label* if_indirect) { + TNode source_instance_type = LoadInstanceType(source); + CSA_DCHECK(this, Word32Equal(Word32And(source_instance_type, + Int32Constant(kStringEncodingMask)), + Int32Constant(kTwoByteStringTag))); + GotoIfNot(Word32Equal(Word32And(source_instance_type, + Int32Constant(kIsIndirectStringMask | + kUncachedExternalStringMask)), + Int32Constant(0)), + if_indirect); + + TNode source_data = DirectStringData(source, source_instance_type); + // The destination string is a freshly allocated SeqString, and so is always + // direct. + TNode dest_instance_type = LoadInstanceType(dest); + CSA_DCHECK(this, Word32Equal(Word32And(dest_instance_type, + Int32Constant(kStringEncodingMask)), + Int32Constant(kTwoByteStringTag))); + TNode dest_data = DirectStringData(dest, dest_instance_type); + TNode length = LoadStringLengthAsWord(source); + CSA_DCHECK(this, IntPtrEqual(length, LoadStringLengthAsWord(dest))); + + const TNode replace_unpaired_surrogates = + ExternalConstant(ExternalReference::replace_unpaired_surrogates()); + CallCFunction(replace_unpaired_surrogates, MachineType::Pointer(), + std::make_pair(MachineType::Pointer(), source_data), + std::make_pair(MachineType::Pointer(), dest_data), + std::make_pair(MachineType::IntPtr(), length)); +} + void StringBuiltinsAssembler::BranchIfStringPrimitiveWithNoCustomIteration( TNode object, TNode context, Label* if_true, Label* if_false) { diff --git a/src/builtins/builtins-string-gen.h b/src/builtins/builtins-string-gen.h index bd1390dc24..55647b9096 100644 --- a/src/builtins/builtins-string-gen.h +++ b/src/builtins/builtins-string-gen.h @@ -33,6 +33,10 @@ class StringBuiltinsAssembler : public CodeStubAssembler { TNode LoadSurrogatePairAt(TNode string, TNode length, TNode index, UnicodeEncoding encoding); + TNode HasUnpairedSurrogate(TNode string, Label* if_indirect); + + void ReplaceUnpairedSurrogates(TNode source, TNode dest, + Label* if_indirect); TNode StringFromSingleUTF16EncodedCodePoint(TNode codepoint); diff --git a/src/builtins/string-iswellformed.tq b/src/builtins/string-iswellformed.tq new file mode 100644 index 0000000000..02c8a916c9 --- /dev/null +++ b/src/builtins/string-iswellformed.tq @@ -0,0 +1,45 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-string-gen.h' + +namespace runtime { +extern runtime StringIsWellFormed(Context, String): Boolean; +} + +namespace string { + +extern macro StringBuiltinsAssembler::HasUnpairedSurrogate(String): + bool labels Indirect; + +transitioning javascript builtin +StringPrototypeIsWellFormed( + js-implicit context: NativeContext, + receiver: JSAny)(...arguments): Boolean { + const methodName: constexpr string = 'String.prototype.isWellFormed'; + + // 1. Let O be ? RequireObjectCoercible(this value). + // 2. Let S be ? ToString(O). + const s = ToThisString(receiver, methodName); + + // 3. Return IsStringWellFormedUnicode(S). + + // Fast path: one-byte strings cannot have unpaired surrogates and are + // definitionally well-formed. + if (s.StringInstanceType().is_one_byte) return True; + + // Slow path: flatten the string and look for unpaired surrogates. + // + // TODO(v8:13557): The two-byte case can be optimized by extending the + // InstanceType. See + // https://docs.google.com/document/d/15f-1c_Ysw3lvjy_Gx0SmmD9qeO8UuXuAbWIpWCnTDO8/ + const flat = Flatten(s); + try { + const illFormed = HasUnpairedSurrogate(flat) otherwise Indirect; + return illFormed ? False : True; + } label Indirect deferred { + return runtime::StringIsWellFormed(context, flat); + } +} +} diff --git a/src/builtins/string-towellformed.tq b/src/builtins/string-towellformed.tq new file mode 100644 index 0000000000..b900862f77 --- /dev/null +++ b/src/builtins/string-towellformed.tq @@ -0,0 +1,59 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-string-gen.h' + +namespace runtime { +extern runtime StringToWellFormed(Context, String): String; +} + +namespace string { + +extern macro StringBuiltinsAssembler::ReplaceUnpairedSurrogates( + String, String): void labels Indirect; + +transitioning javascript builtin +StringPrototypeToWellFormed( + js-implicit context: NativeContext, receiver: JSAny)(...arguments): String { + const methodName: constexpr string = 'String.prototype.toWellFormed'; + + // 1. Let O be ? RequireObjectCoercible(this value). + // 2. Let S be ? ToString(O). + const s = ToThisString(receiver, methodName); + + // Fast path: one-byte strings cannot have unpaired surrogates and are + // definitionally well-formed. + if (s.StringInstanceType().is_one_byte) return s; + + // 3. Let strLen be the length of S. + const strLen = s.length_uint32; + + // 4. Let k be 0. + // 5. Let result be the empty String. + const flat = Flatten(s); + let result = flat; + + // 6. Repeat, while k < strLen, + // a. Let cp be CodePointAt(S, k). + // b. If cp.[[IsUnpairedSurrogate]] is true, then + // i. Set result to the string-concatenation of result and + // 0xFFFD (REPLACEMENT CHARACTER). + // c. Else, + // i. Set result to the string-concatenation of result and + // UTF16EncodeCodePoint(cp.[[CodePoint]]). + // d. Set k to k + cp.[[CodeUnitCount]]. + try { + const illFormed = HasUnpairedSurrogate(flat) otherwise Indirect; + if (illFormed) { + result = AllocateSeqTwoByteString(strLen); + ReplaceUnpairedSurrogates(flat, result) otherwise Indirect; + } + + // 7. Return result. + return result; + } label Indirect deferred { + return runtime::StringToWellFormed(context, flat); + } +} +} diff --git a/src/codegen/external-reference.cc b/src/codegen/external-reference.cc index 820cae92ca..1f733a9d4f 100644 --- a/src/codegen/external-reference.cc +++ b/src/codegen/external-reference.cc @@ -36,6 +36,7 @@ #include "src/regexp/regexp-macro-assembler-arch.h" #include "src/regexp/regexp-stack.h" #include "src/strings/string-search.h" +#include "src/strings/unicode-inl.h" #if V8_ENABLE_WEBASSEMBLY #include "src/wasm/wasm-external-refs.h" @@ -1153,6 +1154,24 @@ static Address LexicographicCompareWrapper(Isolate* isolate, Address smi_x, FUNCTION_REFERENCE(smi_lexicographic_compare_function, LexicographicCompareWrapper) +uint32_t HasUnpairedSurrogate(const uint16_t* code_units, size_t length) { + // Use uint32_t to avoid complexity around bool return types. + static constexpr uint32_t kTrue = 1; + static constexpr uint32_t kFalse = 0; + return unibrow::Utf16::HasUnpairedSurrogate(code_units, length) ? kTrue + : kFalse; +} + +FUNCTION_REFERENCE(has_unpaired_surrogate, HasUnpairedSurrogate) + +void ReplaceUnpairedSurrogates(const uint16_t* source_code_units, + uint16_t* dest_code_units, size_t length) { + return unibrow::Utf16::ReplaceUnpairedSurrogates(source_code_units, + dest_code_units, length); +} + +FUNCTION_REFERENCE(replace_unpaired_surrogates, ReplaceUnpairedSurrogates) + FUNCTION_REFERENCE(mutable_big_int_absolute_add_and_canonicalize_function, MutableBigInt_AbsoluteAddAndCanonicalize) diff --git a/src/codegen/external-reference.h b/src/codegen/external-reference.h index 6ca4097a1b..45131780ec 100644 --- a/src/codegen/external-reference.h +++ b/src/codegen/external-reference.h @@ -231,6 +231,8 @@ class StatsCounter; V(array_indexof_includes_smi_or_object, \ "array_indexof_includes_smi_or_object") \ V(array_indexof_includes_double, "array_indexof_includes_double") \ + V(has_unpaired_surrogate, "Utf16::HasUnpairedSurrogate") \ + V(replace_unpaired_surrogates, "Utf16::ReplaceUnpairedSurrogates") \ V(try_string_to_index_or_lookup_existing, \ "try_string_to_index_or_lookup_existing") \ V(string_from_forward_table, "string_from_forward_table") \ diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index a74006935b..4e3bc2be69 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -235,7 +235,8 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features") V(harmony_rab_gsab, \ "harmony ResizableArrayBuffer / GrowableSharedArrayBuffer") \ V(harmony_rab_gsab_transfer, "harmony ArrayBuffer.transfer") \ - V(harmony_array_grouping, "harmony array grouping") + V(harmony_array_grouping, "harmony array grouping") \ + V(harmony_string_is_well_formed, "harmony String#{is,to}WellFormed") DEFINE_IMPLICATION(harmony_rab_gsab_transfer, harmony_rab_gsab) diff --git a/src/init/bootstrapper.cc b/src/init/bootstrapper.cc index 3f5050e824..e0dca29f9e 100644 --- a/src/init/bootstrapper.cc +++ b/src/init/bootstrapper.cc @@ -4892,6 +4892,18 @@ void Genesis::InitializeGlobal_harmony_rab_gsab() { Builtin::kSharedArrayBufferPrototypeGrow, 1, true); } +void Genesis::InitializeGlobal_harmony_string_is_well_formed() { + if (!v8_flags.harmony_string_is_well_formed) return; + Handle string_function(native_context()->string_function(), + isolate()); + Handle string_prototype( + JSObject::cast(string_function->initial_map().prototype()), isolate()); + SimpleInstallFunction(isolate(), string_prototype, "isWellFormed", + Builtin::kStringPrototypeIsWellFormed, 0, false); + SimpleInstallFunction(isolate(), string_prototype, "toWellFormed", + Builtin::kStringPrototypeToWellFormed, 0, false); +} + void Genesis::InitializeGlobal_harmony_temporal() { if (!v8_flags.harmony_temporal) return; // -- T e m p o r a l diff --git a/src/objects/string-inl.h b/src/objects/string-inl.h index 87fd2c5505..efbdd12e00 100644 --- a/src/objects/string-inl.h +++ b/src/objects/string-inl.h @@ -18,6 +18,7 @@ #include "src/sandbox/external-pointer-inl.h" #include "src/sandbox/external-pointer.h" #include "src/strings/string-hasher-inl.h" +#include "src/strings/unicode-inl.h" #include "src/utils/utils.h" // Has to be the last include (doesn't have include guards): @@ -956,6 +957,21 @@ ConsString String::VisitFlat( } } +bool String::IsWellFormedUnicode(Isolate* isolate, Handle string) { + // One-byte strings are definitionally well formed and cannot have unpaired + // surrogates. + if (string->IsOneByteRepresentation()) return true; + + // TODO(v8:13557): The two-byte case can be optimized by extending the + // InstanceType. See + // https://docs.google.com/document/d/15f-1c_Ysw3lvjy_Gx0SmmD9qeO8UuXuAbWIpWCnTDO8/ + string = Flatten(isolate, string); + DCHECK(string->IsTwoByteRepresentation()); + DisallowGarbageCollection no_gc; + const uint16_t* data = string->template GetChars(isolate, no_gc); + return !unibrow::Utf16::HasUnpairedSurrogate(data, string->length()); +} + template <> inline base::Vector String::GetCharVector( const DisallowGarbageCollection& no_gc) { diff --git a/src/objects/string.h b/src/objects/string.h index bf1730314e..41eb67f3b1 100644 --- a/src/objects/string.h +++ b/src/objects/string.h @@ -526,6 +526,10 @@ class String : public TorqueGeneratedString { PtrComprCageBase cage_base, const SharedStringAccessGuardIfNeeded&); + // Returns true if this string has no unpaired surrogates and false otherwise. + static inline bool IsWellFormedUnicode(Isolate* isolate, + Handle string); + static inline bool IsAscii(const char* chars, int length) { return IsAscii(reinterpret_cast(chars), length); } diff --git a/src/runtime/runtime-strings.cc b/src/runtime/runtime-strings.cc index 1f2da1cd19..189cc4c28c 100644 --- a/src/runtime/runtime-strings.cc +++ b/src/runtime/runtime-strings.cc @@ -10,6 +10,7 @@ #include "src/objects/slots.h" #include "src/objects/smi.h" #include "src/strings/string-builder-inl.h" +#include "src/strings/unicode-inl.h" #if V8_ENABLE_WEBASSEMBLY // TODO(chromium:1236668): Drop this when the "SaveAndClearThreadInWasmFlag" @@ -471,5 +472,30 @@ RUNTIME_FUNCTION(Runtime_StringEscapeQuotes) { return *builder.ToString().ToHandleChecked(); } +RUNTIME_FUNCTION(Runtime_StringIsWellFormed) { + HandleScope handle_scope(isolate); + DCHECK_EQ(1, args.length()); + Handle string = args.at(0); + return isolate->heap()->ToBoolean( + String::IsWellFormedUnicode(isolate, string)); +} + +RUNTIME_FUNCTION(Runtime_StringToWellFormed) { + HandleScope handle_scope(isolate); + DCHECK_EQ(1, args.length()); + Handle source = args.at(0); + if (String::IsWellFormedUnicode(isolate, source)) return *source; + source = String::Flatten(isolate, source); + const int length = source->length(); + Handle dest = + isolate->factory()->NewRawTwoByteString(length).ToHandleChecked(); + DisallowGarbageCollection no_gc; + const uint16_t* source_data = + source->template GetChars(isolate, no_gc); + uint16_t* dest_data = dest->GetChars(no_gc); + unibrow::Utf16::ReplaceUnpairedSurrogates(source_data, dest_data, length); + return *dest; +} + } // namespace internal } // namespace v8 diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index f250123008..ecf50b7c66 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -462,13 +462,15 @@ namespace internal { F(StringEscapeQuotes, 1, 1) \ F(StringGreaterThan, 2, 1) \ F(StringGreaterThanOrEqual, 2, 1) \ + F(StringIsWellFormed, 1, 1) \ F(StringLastIndexOf, 2, 1) \ F(StringLessThan, 2, 1) \ F(StringLessThanOrEqual, 2, 1) \ F(StringMaxLength, 0, 1) \ F(StringReplaceOneCharWithString, 3, 1) \ F(StringSubstring, 3, 1) \ - F(StringToArray, 2, 1) + F(StringToArray, 2, 1) \ + F(StringToWellFormed, 1, 1) #define FOR_EACH_INTRINSIC_SYMBOL(F, I) \ F(CreatePrivateNameSymbol, 1, 1) \ diff --git a/src/strings/unicode.cc b/src/strings/unicode.cc index 0a9b3bbb3d..d98d946f16 100644 --- a/src/strings/unicode.cc +++ b/src/strings/unicode.cc @@ -239,6 +239,37 @@ bool Utf8::ValidateEncoding(const byte* bytes, size_t length) { return state == State::kAccept; } +// static +void Utf16::ReplaceUnpairedSurrogates(const uint16_t* source_code_units, + uint16_t* dest_code_units, + size_t length) { + // U+FFFD (REPLACEMENT CHARACTER) + constexpr uint16_t kReplacement = 0xFFFD; + + for (size_t i = 0; i < length; i++) { + const uint16_t source_code_unit = source_code_units[i]; + const size_t copy_index = i; + uint16_t dest_code_unit = source_code_unit; + if (IsLeadSurrogate(source_code_unit)) { + // The current code unit is a leading surrogate. If it's not followed by a + // trailing surrogate, replace it with the replacement character. + if (i == length - 1 || !IsTrailSurrogate(source_code_units[i + 1])) { + dest_code_unit = kReplacement; + } else { + // Copy the paired trailing surrogate. The paired leading surrogate will + // be copied below. + ++i; + dest_code_units[i] = source_code_units[i]; + } + } else if (IsTrailSurrogate(source_code_unit)) { + // All paired trailing surrogates are skipped above, so this branch is + // only for those that are unpaired. + dest_code_unit = kReplacement; + } + dest_code_units[copy_index] = dest_code_unit; + } +} + #if V8_ENABLE_WEBASSEMBLY bool Wtf8::ValidateEncoding(const byte* bytes, size_t length) { using State = GeneralizedUtf8DfaDecoder::State; diff --git a/src/strings/unicode.h b/src/strings/unicode.h index 4b73bd4c3c..0145013b17 100644 --- a/src/strings/unicode.h +++ b/src/strings/unicode.h @@ -121,7 +121,7 @@ class Utf16 { // 4 bytes and the 3 bytes that were used to encode the lead surrogate // can be reclaimed. static const int kMaxExtraUtf8BytesForOneUtf16CodeUnit = 3; - // One UTF-16 surrogate is endoded (illegally) as 3 UTF-8 bytes. + // One UTF-16 surrogate is encoded (illegally) as 3 UTF-8 bytes. // The illegality stems from the surrogate not being part of a pair. static const int kUtf8BytesToCodeASurrogate = 3; static inline uint16_t LeadSurrogate(uint32_t char_code) { @@ -132,6 +132,10 @@ class Utf16 { } static inline bool HasUnpairedSurrogate(const uint16_t* code_units, size_t length); + + static void ReplaceUnpairedSurrogates(const uint16_t* source_code_units, + uint16_t* dest_code_units, + size_t length); }; class Latin1 { diff --git a/test/mjsunit/harmony/string-iswellformed-external-uncached.js b/test/mjsunit/harmony/string-iswellformed-external-uncached.js new file mode 100644 index 0000000000..193294e394 --- /dev/null +++ b/test/mjsunit/harmony/string-iswellformed-external-uncached.js @@ -0,0 +1,42 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --expose-externalize-string --harmony-string-is-well-formed + +(function TestIsWellFormed() { + const short2ByteWellFormed = '\u1234'; + const short2ByteIllFormed = '\uD83D'; + + assertTrue(short2ByteWellFormed.isWellFormed()); + assertFalse(short2ByteIllFormed.isWellFormed()); + + try { + // Turn the strings into uncached external strings to hit the slow runtime + // path. + externalizeString(short2ByteWellFormed, true); + externalizeString(short2ByteIllFormed, true); + } catch (e) {} + + assertTrue(short2ByteWellFormed.isWellFormed()); + assertFalse(short2ByteIllFormed.isWellFormed()); +})(); + +(function TestToWellFormed() { + const short2ByteWellFormed = '\u1234'; + const short2ByteIllFormed = '\uD83D'; + + assertTrue(short2ByteWellFormed.isWellFormed()); + assertFalse(short2ByteIllFormed.isWellFormed()); + + try { + // Turn the strings into uncached external strings to hit the slow runtime + // path. + externalizeString(short2ByteWellFormed, true); + externalizeString(short2ByteIllFormed, true); + } catch (e) {} + + assertEquals('\u1234', short2ByteWellFormed.toWellFormed()); + // U+FFFD (REPLACEMENT CHARACTER) + assertEquals('\uFFFD', short2ByteIllFormed.toWellFormed()); +})(); diff --git a/test/test262/test262.status b/test/test262/test262.status index 450e157d4f..ec65e9b554 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -994,20 +994,6 @@ 'language/statements/class/decorator/syntax/valid/class-element-decorator-member-expr-identifier-reference': [FAIL], 'language/statements/class/decorator/syntax/valid/class-element-decorator-parenthesized-expr-identifier-reference': [FAIL], - # https://bugs.chromium.org/p/v8/issues/detail?id=13557 - 'built-ins/String/prototype/isWellFormed/length': [FAIL], - 'built-ins/String/prototype/isWellFormed/name': [FAIL], - 'built-ins/String/prototype/isWellFormed/prop-desc': [FAIL], - 'built-ins/String/prototype/isWellFormed/return-abrupt-from-this': [FAIL], - 'built-ins/String/prototype/isWellFormed/returns-boolean': [FAIL], - 'built-ins/String/prototype/isWellFormed/to-string': [FAIL], - 'built-ins/String/prototype/toWellFormed/length': [FAIL], - 'built-ins/String/prototype/toWellFormed/name': [FAIL], - 'built-ins/String/prototype/toWellFormed/prop-desc': [FAIL], - 'built-ins/String/prototype/toWellFormed/return-abrupt-from-this': [FAIL], - 'built-ins/String/prototype/toWellFormed/returns-well-formed-string': [FAIL], - 'built-ins/String/prototype/toWellFormed/to-string': [FAIL], - # https://bugs.chromium.org/p/v8/issues/detail?id=11660 # https://github.com/tc39/proposal-intl-duration-format/issues/114 'intl402/DurationFormat/prototype/format/style-options-en': [FAIL], diff --git a/test/test262/testcfg.py b/test/test262/testcfg.py index 2baa325253..44d034e61f 100644 --- a/test/test262/testcfg.py +++ b/test/test262/testcfg.py @@ -56,6 +56,8 @@ FEATURE_FLAGS = { 'array-grouping': '--harmony-array-grouping', 'change-array-by-copy': '--harmony-change-array-by-copy', 'symbols-as-weakmap-keys': '--harmony-symbol-as-weakmap-key', + 'String.prototype.isWellFormed': '--harmony-string-is-well-formed', + 'String.prototype.toWellFormed': '--harmony-string-is-well-formed', } SKIPPED_FEATURES = set([]) diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py index 19e3330fd5..ee486d1cc5 100644 --- a/tools/v8heapconst.py +++ b/tools/v8heapconst.py @@ -577,30 +577,30 @@ KNOWN_OBJECTS = { ("old_space", 0x04581): "StringSplitCache", ("old_space", 0x04989): "RegExpMultipleCache", ("old_space", 0x04d91): "BuiltinsConstantsTable", - ("old_space", 0x05359): "AsyncFunctionAwaitRejectSharedFun", - ("old_space", 0x0537d): "AsyncFunctionAwaitResolveSharedFun", - ("old_space", 0x053a1): "AsyncGeneratorAwaitRejectSharedFun", - ("old_space", 0x053c5): "AsyncGeneratorAwaitResolveSharedFun", - ("old_space", 0x053e9): "AsyncGeneratorYieldWithAwaitResolveSharedFun", - ("old_space", 0x0540d): "AsyncGeneratorReturnResolveSharedFun", - ("old_space", 0x05431): "AsyncGeneratorReturnClosedRejectSharedFun", - ("old_space", 0x05455): "AsyncGeneratorReturnClosedResolveSharedFun", - ("old_space", 0x05479): "AsyncIteratorValueUnwrapSharedFun", - ("old_space", 0x0549d): "PromiseAllResolveElementSharedFun", - ("old_space", 0x054c1): "PromiseAllSettledResolveElementSharedFun", - ("old_space", 0x054e5): "PromiseAllSettledRejectElementSharedFun", - ("old_space", 0x05509): "PromiseAnyRejectElementSharedFun", - ("old_space", 0x0552d): "PromiseCapabilityDefaultRejectSharedFun", - ("old_space", 0x05551): "PromiseCapabilityDefaultResolveSharedFun", - ("old_space", 0x05575): "PromiseCatchFinallySharedFun", - ("old_space", 0x05599): "PromiseGetCapabilitiesExecutorSharedFun", - ("old_space", 0x055bd): "PromiseThenFinallySharedFun", - ("old_space", 0x055e1): "PromiseThrowerFinallySharedFun", - ("old_space", 0x05605): "PromiseValueThunkFinallySharedFun", - ("old_space", 0x05629): "ProxyRevokeSharedFun", - ("old_space", 0x0564d): "ShadowRealmImportValueFulfilledSFI", - ("old_space", 0x05671): "SourceTextModuleExecuteAsyncModuleFulfilledSFI", - ("old_space", 0x05695): "SourceTextModuleExecuteAsyncModuleRejectedSFI", + ("old_space", 0x05361): "AsyncFunctionAwaitRejectSharedFun", + ("old_space", 0x05385): "AsyncFunctionAwaitResolveSharedFun", + ("old_space", 0x053a9): "AsyncGeneratorAwaitRejectSharedFun", + ("old_space", 0x053cd): "AsyncGeneratorAwaitResolveSharedFun", + ("old_space", 0x053f1): "AsyncGeneratorYieldWithAwaitResolveSharedFun", + ("old_space", 0x05415): "AsyncGeneratorReturnResolveSharedFun", + ("old_space", 0x05439): "AsyncGeneratorReturnClosedRejectSharedFun", + ("old_space", 0x0545d): "AsyncGeneratorReturnClosedResolveSharedFun", + ("old_space", 0x05481): "AsyncIteratorValueUnwrapSharedFun", + ("old_space", 0x054a5): "PromiseAllResolveElementSharedFun", + ("old_space", 0x054c9): "PromiseAllSettledResolveElementSharedFun", + ("old_space", 0x054ed): "PromiseAllSettledRejectElementSharedFun", + ("old_space", 0x05511): "PromiseAnyRejectElementSharedFun", + ("old_space", 0x05535): "PromiseCapabilityDefaultRejectSharedFun", + ("old_space", 0x05559): "PromiseCapabilityDefaultResolveSharedFun", + ("old_space", 0x0557d): "PromiseCatchFinallySharedFun", + ("old_space", 0x055a1): "PromiseGetCapabilitiesExecutorSharedFun", + ("old_space", 0x055c5): "PromiseThenFinallySharedFun", + ("old_space", 0x055e9): "PromiseThrowerFinallySharedFun", + ("old_space", 0x0560d): "PromiseValueThunkFinallySharedFun", + ("old_space", 0x05631): "ProxyRevokeSharedFun", + ("old_space", 0x05655): "ShadowRealmImportValueFulfilledSFI", + ("old_space", 0x05679): "SourceTextModuleExecuteAsyncModuleFulfilledSFI", + ("old_space", 0x0569d): "SourceTextModuleExecuteAsyncModuleRejectedSFI", } # Lower 32 bits of first page addresses for various heap spaces. From b69c3770b2c5c734d4024bd5cce9cc157855ee2c Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Thu, 22 Dec 2022 10:29:52 -0800 Subject: [PATCH 052/654] [ShadowRealm] Make CallSite#getFunction/getThis throw for ShadowRealm frames This CL plugs the hole in the outside<-ShadowRealm direction (i.e. getting a reference to an object inside of the ShadowRealm from outside the ShadowRealm). This is a follow-on CL to https://chromium-review.googlesource.com/c/v8/v8/+/4108810, which plugged the getFunction hole in the ShadowRealm<-outside direction (i.e. getting a reference to an object outside of the ShadowRealm from inside the ShadowRealm). Bug: v8:1198 Change-Id: Ic06533ba8b1cc6477ef9d55a23cb8b0b6584d4a6 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4115657 Commit-Queue: Shu-yu Guo Reviewed-by: Camillo Bruni Auto-Submit: Shu-yu Guo Cr-Commit-Position: refs/heads/main@{#85003} --- src/builtins/builtins-callsite.cc | 23 ++++- .../harmony/shadowrealm-callsite-throw.js | 91 ++++++++++++++++--- 2 files changed, 97 insertions(+), 17 deletions(-) diff --git a/src/builtins/builtins-callsite.cc b/src/builtins/builtins-callsite.cc index 6afe84dfd5..60f347f643 100644 --- a/src/builtins/builtins-callsite.cc +++ b/src/builtins/builtins-callsite.cc @@ -24,6 +24,7 @@ namespace internal { isolate->factory()->NewStringFromAsciiChecked(method))); \ } \ Handle frame = Handle::cast(it.GetDataValue()) + namespace { Object PositiveNumberOrNull(int value, Isolate* isolate) { @@ -31,6 +32,10 @@ Object PositiveNumberOrNull(int value, Isolate* isolate) { return ReadOnlyRoots(isolate).null_value(); } +bool NativeContextIsForShadowRealm(NativeContext native_context) { + return native_context.scope_info().scope_type() == SHADOW_REALM_SCOPE; +} + } // namespace BUILTIN(CallSitePrototypeGetColumnNumber) { @@ -69,8 +74,13 @@ BUILTIN(CallSitePrototypeGetFunction) { static const char method_name[] = "getFunction"; HandleScope scope(isolate); CHECK_CALLSITE(frame, method_name); - if (isolate->raw_native_context().scope_info().scope_type() == - SHADOW_REALM_SCOPE) { + // ShadowRealms have a boundary: references to outside objects must not exist + // in the ShadowRealm, and references to ShadowRealm objects must not exist + // outside the ShadowRealm. + if (NativeContextIsForShadowRealm(isolate->raw_native_context()) || + (frame->function().IsJSFunction() && + NativeContextIsForShadowRealm( + JSFunction::cast(frame->function()).native_context()))) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError( @@ -136,8 +146,13 @@ BUILTIN(CallSitePrototypeGetThis) { static const char method_name[] = "getThis"; HandleScope scope(isolate); CHECK_CALLSITE(frame, method_name); - if (isolate->raw_native_context().scope_info().scope_type() == - SHADOW_REALM_SCOPE) { + // ShadowRealms have a boundary: references to outside objects must not exist + // in the ShadowRealm, and references to ShadowRealm objects must not exist + // outside the ShadowRealm. + if (NativeContextIsForShadowRealm(isolate->raw_native_context()) || + (frame->function().IsJSFunction() && + NativeContextIsForShadowRealm( + JSFunction::cast(frame->function()).native_context()))) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError( diff --git a/test/mjsunit/harmony/shadowrealm-callsite-throw.js b/test/mjsunit/harmony/shadowrealm-callsite-throw.js index 0739db4811..75a09da23f 100644 --- a/test/mjsunit/harmony/shadowrealm-callsite-throw.js +++ b/test/mjsunit/harmony/shadowrealm-callsite-throw.js @@ -4,14 +4,15 @@ // Flags: --harmony-shadow-realm -// Test that CallSite#getFunction and CallSite#getThis throw inside -// ShadowRealms, as otherwise we could violate the callable boundary invariant. +// Test that CallSite#getFunction and CallSite#getThis throw inside ShadowRealms +// and cannot access objects from the outside, as otherwise we could violate the +// callable boundary invariant. +(function testInside() { + const shadowRealm = new ShadowRealm(); -const shadowRealm = new ShadowRealm(); - -// The ShadowRealm won't have assertThrows, so use try-catch and accumulate a -// message string. -const wrapped = shadowRealm.evaluate(` + // The ShadowRealm won't have assertThrows, so use try-catch and accumulate a + // message string. + const wrapped = shadowRealm.evaluate(` Error.prepareStackTrace = function(err, frames) { let a = []; for (let i = 0; i < frames.length; i++) { @@ -40,10 +41,74 @@ function inner() { inner; `); -(function outer() { - // There are 3 frames: top-level, outer, inner, so getFunction/getThis should - // throw 3 times. - assertEquals("getFunction threw getThis threw " + - "getFunction threw getThis threw " + - "getFunction threw getThis threw", wrapped()); + (function outer() { + // There are 4 frames, youngest to oldest: + // + // inner + // outer + // testInside + // top-level + // + // So getFunction/getThis should throw 4 times since the prepareStackTrace + // hook is executing inside the ShadowRealm. + assertEquals("getFunction threw getThis threw " + + "getFunction threw getThis threw " + + "getFunction threw getThis threw " + + "getFunction threw getThis threw", wrapped()); + })(); +})(); + +// Test that CallSite#getFunction and CallSite#getThis throw for ShadowRealm +// objects from the outside, as otherwise we can also violate the callable +// boundary. +(function testOutside() { + Error.prepareStackTrace = function(err, frames) { + let a = []; + for (let i = 0; i < frames.length; i++) { + try { + frames[i].getFunction(); + a.push(`functionName: ${frames[i].getFunctionName()}`); + } catch (e) { + a.push(`${frames[i].getFunctionName()} threw`); + } + try { + frames[i].getThis(); + a.push("t"); + } catch (e) { + a.push("getThis threw"); + } + } + return JSON.stringify(a); + }; + const shadowRealm = new ShadowRealm(); + const wrap = shadowRealm.evaluate(` +function trampolineMaker(callback) { + return function trampoline() { return callback(); }; +} +trampolineMaker; +`); + const wrapped = wrap(function callback() { + try { + throw new Error(); + } catch (e) { + return e.stack; + } + }); + + + // There are 4 frames, youngest to oldest: + // + // callback (in outer realm) + // trampoline (in ShadowRealm) + // testOutside (in outer realm) + // top-level (in outer realm) + // + // The frame corresponding to trampoline should throw, since the outer realm + // should not get references to ShadowRealm objects. + assertEquals(JSON.stringify( + ["functionName: callback", "t", + "trampoline threw", "getThis threw", + "functionName: testOutside", "t", + "functionName: null", "t"]), wrapped()); + assertEquals })(); From 31751172938717b1351292b174b282541daeb20c Mon Sep 17 00:00:00 2001 From: Choongwoo Han Date: Thu, 22 Dec 2022 10:35:36 -0800 Subject: [PATCH 053/654] [wasm] Recycling locals array when building a graph. Wasm graph builder duplicates a vector of local variables for each block or branch, but it never frees them. So, if a single function has a lot of local variables allocated, the graph builder bloats the memory. This CL actively frees the locals vector and reuses them within the zone. It's easy to reuse them since the locals vector always have the same size. This saves 95% of memory for the reported wasm binary. Bug: v8:13543 Change-Id: Ibf5e846b405adc24101ed79dd34d08160ceda2fb Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4112638 Reviewed-by: Clemens Backes Commit-Queue: Choongwoo Han Cr-Commit-Position: refs/heads/main@{#85004} --- src/wasm/graph-builder-interface.cc | 179 ++++++++++++++++++++-------- 1 file changed, 130 insertions(+), 49 deletions(-) diff --git a/src/wasm/graph-builder-interface.cc b/src/wasm/graph-builder-interface.cc index 671cad7789..c6b05ad035 100644 --- a/src/wasm/graph-builder-interface.cc +++ b/src/wasm/graph-builder-interface.cc @@ -4,6 +4,7 @@ #include "src/wasm/graph-builder-interface.h" +#include "src/base/vector.h" #include "src/compiler/wasm-compiler-definitions.h" #include "src/compiler/wasm-compiler.h" #include "src/flags/flags.h" @@ -25,11 +26,54 @@ namespace { // Expose {compiler::Node} opaquely as {wasm::TFNode}. using TFNode = compiler::Node; +using LocalsAllocator = RecyclingZoneAllocator; + +class LocalsVector { + public: + LocalsVector(LocalsAllocator* allocator, size_t size) + : allocator_(allocator), data_(allocator->allocate(size), size) { + std::fill(data_.begin(), data_.end(), nullptr); + } + LocalsVector(const LocalsVector& other) V8_NOEXCEPT + : allocator_(other.allocator_), + data_(allocator_->allocate(other.size()), other.size()) { + data_.OverwriteWith(other.data_); + } + LocalsVector(LocalsVector&& other) V8_NOEXCEPT + : allocator_(other.allocator_), + data_(other.data_.begin(), other.size()) { + other.data_.Truncate(0); + } + ~LocalsVector() { Clear(); } + + LocalsVector& operator=(const LocalsVector& other) V8_NOEXCEPT { + allocator_ = other.allocator_; + if (!data_.size()) { + data_ = base::Vector(allocator_->allocate(other.size()), + other.size()); + } + data_.OverwriteWith(other.data_); + return *this; + } + TFNode*& operator[](size_t index) { return data_[index]; } + size_t size() const { return data_.size(); } + + void Clear() { + if (size()) allocator_->deallocate(data_.begin(), size()); + data_.Truncate(0); + } + + private: + LocalsAllocator* allocator_ = nullptr; + base::Vector data_; +}; // An SsaEnv environment carries the current local variable renaming // as well as the current effect and control dependency in the TF graph. // It maintains a control state that tracks whether the environment // is reachable, has reached a control end, or has been merged. +// It's encouraged to manage lifetime of SsaEnv by `ScopedSsaEnv` or +// `Control` (`block_env`, `false_env`, or `try_info->catch_env`). struct SsaEnv : public ZoneObject { enum State { kUnreachable, kReached, kMerged }; @@ -37,14 +81,14 @@ struct SsaEnv : public ZoneObject { TFNode* control; TFNode* effect; compiler::WasmInstanceCacheNodes instance_cache; - ZoneVector locals; + LocalsVector locals; - SsaEnv(Zone* zone, State state, TFNode* control, TFNode* effect, + SsaEnv(LocalsAllocator* alloc, State state, TFNode* control, TFNode* effect, uint32_t locals_size) : state(state), control(control), effect(effect), - locals(locals_size, zone) {} + locals(alloc, locals_size) {} SsaEnv(const SsaEnv& other) V8_NOEXCEPT = default; SsaEnv(SsaEnv&& other) V8_NOEXCEPT : state(other.state), @@ -57,12 +101,10 @@ struct SsaEnv : public ZoneObject { void Kill() { state = kUnreachable; - for (TFNode*& local : locals) { - local = nullptr; - } control = nullptr; effect = nullptr; instance_cache = {}; + locals.Clear(); } void SetNotMerged() { if (state == kMerged) state = kReached; @@ -100,20 +142,44 @@ class WasmGraphBuildingInterface { struct Control : public ControlBase { SsaEnv* merge_env = nullptr; // merge environment for the construct. SsaEnv* false_env = nullptr; // false environment (only for if). + SsaEnv* block_env = nullptr; // environment that dies with this block. TryInfo* try_info = nullptr; // information about try statements. int32_t previous_catch = -1; // previous Control with a catch. BitVector* loop_assignments = nullptr; // locals assigned in this loop. TFNode* loop_node = nullptr; // loop header of this loop. - MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control); + DISALLOW_IMPLICIT_CONSTRUCTORS(Control); template explicit Control(Args&&... args) V8_NOEXCEPT : ControlBase(std::forward(args)...) {} + Control(Control&& other) V8_NOEXCEPT + : ControlBase(std::move(other)), + merge_env(other.merge_env), + false_env(other.false_env), + block_env(other.block_env), + try_info(other.try_info), + previous_catch(other.previous_catch), + loop_assignments(other.loop_assignments), + loop_node(other.loop_node) { + // The `control_` vector in WasmFullDecoder calls destructor of this when + // growing capacity. Nullify these pointers to avoid destroying + // environments before used. + other.false_env = nullptr; + other.block_env = nullptr; + other.try_info = nullptr; + } + ~Control() { + if (false_env) false_env->Kill(); + if (block_env) block_env->Kill(); + if (try_info) try_info->catch_env->Kill(); + } }; WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder, - int func_index, InlinedStatus inlined_status) - : builder_(builder), + int func_index, InlinedStatus inlined_status, + Zone* zone) + : locals_allocator_(zone), + builder_(builder), func_index_(func_index), inlined_status_(inlined_status) {} @@ -148,7 +214,7 @@ class WasmGraphBuildingInterface { builder_->Start(static_cast(decoder->sig_->parameter_count() + 1 + 1)); uint32_t num_locals = decoder->num_locals(); SsaEnv* ssa_env = decoder->zone()->New( - decoder->zone(), SsaEnv::kReached, effect(), control(), num_locals); + &locals_allocator_, SsaEnv::kReached, effect(), control(), num_locals); SetEnv(ssa_env); // Initialize local variables. Parameters are shifted by 1 because of the @@ -213,12 +279,13 @@ class WasmGraphBuildingInterface { // The branch environment is the outer environment. block->merge_env = ssa_env_; SetEnv(Steal(decoder->zone(), ssa_env_)); + block->block_env = ssa_env_; } void Loop(FullDecoder* decoder, Control* block) { // This is the merge environment at the beginning of the loop. SsaEnv* merge_env = Steal(decoder->zone(), ssa_env_); - block->merge_env = merge_env; + block->merge_env = block->block_env = merge_env; SetEnv(merge_env); ssa_env_->state = SsaEnv::kMerged; @@ -279,6 +346,7 @@ class WasmGraphBuildingInterface { } // Now we setup a new environment for the inside of the loop. + // TODO(choongwoo): Clear locals of the following SsaEnv after use. SetEnv(Split(decoder->zone(), ssa_env_)); builder_->StackCheck(decoder->module_->has_shared_memory ? &ssa_env_->instance_cache @@ -296,15 +364,15 @@ class WasmGraphBuildingInterface { void Try(FullDecoder* decoder, Control* block) { SsaEnv* outer_env = ssa_env_; - SsaEnv* catch_env = Split(decoder->zone(), outer_env); - // Mark catch environment as unreachable, since only accessable - // through catch unwinding (i.e. landing pads). - catch_env->state = SsaEnv::kUnreachable; - SsaEnv* try_env = Steal(decoder->zone(), outer_env); + SsaEnv* catch_env = Steal(decoder->zone(), outer_env); + // Steal catch_env to make catch_env unreachable and clear locals. + // The unreachable catch_env will create and copy locals in `Goto`. + SsaEnv* try_env = Steal(decoder->zone(), catch_env); SetEnv(try_env); TryInfo* try_info = decoder->zone()->New(catch_env); block->merge_env = outer_env; block->try_info = try_info; + block->block_env = try_env; } void If(FullDecoder* decoder, const Value& cond, Control* if_block) { @@ -332,6 +400,7 @@ class WasmGraphBuildingInterface { true_env->control = if_true; if_block->merge_env = merge_env; if_block->false_env = false_env; + if_block->block_env = true_env; SetEnv(true_env); } @@ -506,8 +575,9 @@ class WasmGraphBuildingInterface { uint32_t ret_count = static_cast(decoder->sig_->return_count()); NodeVector values(ret_count); SsaEnv* internal_env = ssa_env_; + SsaEnv* exit_env = nullptr; if (emit_loop_exits()) { - SsaEnv* exit_env = Split(decoder->zone(), ssa_env_); + exit_env = Split(decoder->zone(), ssa_env_); SetEnv(exit_env); auto stack_values = CopyStackValues(decoder, ret_count, drop_values); BuildNestedLoopExits(decoder, decoder->control_depth() - 1, false, @@ -523,6 +593,7 @@ class WasmGraphBuildingInterface { builder_->TraceFunctionExit(base::VectorOf(values), decoder->position()); } builder_->Return(base::VectorOf(values)); + if (exit_env) exit_env->Kill(); SetEnv(internal_env); } @@ -532,15 +603,12 @@ class WasmGraphBuildingInterface { } else { Control* target = decoder->control_at(depth); if (emit_loop_exits()) { - SsaEnv* internal_env = ssa_env_; - SsaEnv* exit_env = Split(decoder->zone(), ssa_env_); - SetEnv(exit_env); + ScopedSsaEnv exit_env(this, Split(decoder->zone(), ssa_env_)); uint32_t value_count = target->br_merge()->arity; auto stack_values = CopyStackValues(decoder, value_count, drop_values); BuildNestedLoopExits(decoder, depth, true, stack_values); MergeValuesInto(decoder, target, target->br_merge(), stack_values.data()); - SetEnv(internal_env); } else { MergeValuesInto(decoder, target, target->br_merge(), drop_values); } @@ -567,9 +635,8 @@ class WasmGraphBuildingInterface { break; } builder_->SetControl(fenv->control); - SetEnv(tenv); + ScopedSsaEnv scoped_env(this, tenv); BrOrRet(decoder, depth, 1); - SetEnv(fenv); } void BrTable(FullDecoder* decoder, const BranchTableImmediate& imm, @@ -581,23 +648,19 @@ class WasmGraphBuildingInterface { return; } - SsaEnv* branch_env = ssa_env_; // Build branches to the various blocks based on the table. TFNode* sw = builder_->Switch(imm.table_count + 1, key.node); - SsaEnv* copy = Steal(decoder->zone(), branch_env); - SetEnv(copy); BranchTableIterator iterator(decoder, imm); while (iterator.has_next()) { uint32_t i = iterator.cur_index(); uint32_t target = iterator.next(); - SetEnv(Split(decoder->zone(), copy)); + ScopedSsaEnv env(this, Split(decoder->zone(), ssa_env_)); builder_->SetControl(i == imm.table_count ? builder_->IfDefault(sw) : builder_->IfValue(i, sw)); BrOrRet(decoder, target, 1); } DCHECK(decoder->ok()); - SetEnv(branch_env); } void Else(FullDecoder* decoder, Control* if_block) { @@ -857,9 +920,10 @@ class WasmGraphBuildingInterface { builder_->BrOnNull(ref_object.node, &true_env->control, &false_env->control); builder_->SetControl(false_env->control); - SetEnv(true_env); - BrOrRet(decoder, depth, pass_null_along_branch ? 0 : 1); - SetEnv(false_env); + { + ScopedSsaEnv scoped_env(this, true_env); + BrOrRet(decoder, depth, pass_null_along_branch ? 0 : 1); + } SetAndTypeNode( result_on_fallthrough, builder_->TypeGuard(ref_object.node, result_on_fallthrough->type)); @@ -875,9 +939,8 @@ class WasmGraphBuildingInterface { builder_->BrOnNull(ref_object.node, &false_env->control, &true_env->control); builder_->SetControl(false_env->control); - SetEnv(true_env); + ScopedSsaEnv scoped_env(this, true_env); BrOrRet(decoder, depth, 0); - SetEnv(false_env); } void SimdOp(FullDecoder* decoder, WasmOpcode opcode, base::Vector args, @@ -957,6 +1020,7 @@ class WasmGraphBuildingInterface { SsaEnv* if_catch_env = Steal(decoder->zone(), ssa_env_); if_catch_env->control = if_catch; block->try_info->catch_env = if_no_catch_env; + block->block_env = if_catch_env; // If the tags match we extract the values from the exception object and // push them onto the operand stack using the passed {values} vector. @@ -1270,6 +1334,7 @@ class WasmGraphBuildingInterface { null_succeeds ? kNullable : kNonNullable); WasmTypeCheckConfig config = {object.type, to_type}; SsaEnv* branch_env = Split(decoder->zone(), ssa_env_); + // TODO(choongwoo): Clear locals of `no_branch_env` after use. SsaEnv* no_branch_env = Steal(decoder->zone(), ssa_env_); no_branch_env->SetNotMerged(); SsaEnv* match_env = branch_on_match ? branch_env : no_branch_env; @@ -1280,27 +1345,26 @@ class WasmGraphBuildingInterface { builder_->SetControl(no_branch_env->control); if (branch_on_match) { - SetEnv(branch_env); + ScopedSsaEnv scoped_env(this, branch_env, no_branch_env); // Narrow type for the successful cast target branch. Forward(decoder, object, forwarding_value); // Currently, br_on_* instructions modify the value stack before calling // the interface function, so we don't need to drop any values here. BrOrRet(decoder, br_depth, 0); - - SetEnv(no_branch_env); // Note: Differently to below for !{branch_on_match}, we do not Forward // the value here to perform a TypeGuard. It can't be done here due to // asymmetric decoder code. A Forward here would be poped from the stack // and ignored by the decoder. Therefore the decoder has to call Forward // itself. } else { - SetEnv(branch_env); - // It is necessary in case of {null_succeeds} to forward the value. - // This will add a TypeGuard to the non-null type (as in this case the - // object is non-nullable). - Forward(decoder, object, decoder->stack_value(1)); - BrOrRet(decoder, br_depth, 0); - SetEnv(no_branch_env); + { + ScopedSsaEnv scoped_env(this, branch_env, no_branch_env); + // It is necessary in case of {null_succeeds} to forward the value. + // This will add a TypeGuard to the non-null type (as in this case the + // object is non-nullable). + Forward(decoder, object, decoder->stack_value(1)); + BrOrRet(decoder, br_depth, 0); + } // Narrow type for the successful cast fallthrough branch. Forward(decoder, object, forwarding_value); } @@ -1738,6 +1802,7 @@ class WasmGraphBuildingInterface { DanglingExceptions& dangling_exceptions() { return dangling_exceptions_; } private: + LocalsAllocator locals_allocator_; SsaEnv* ssa_env_ = nullptr; compiler::WasmGraphBuilder* builder_; int func_index_; @@ -1753,6 +1818,24 @@ class WasmGraphBuildingInterface { int feedback_instruction_index_ = 0; std::vector type_feedback_; + class V8_NODISCARD ScopedSsaEnv { + public: + ScopedSsaEnv(WasmGraphBuildingInterface* interface, SsaEnv* env, + SsaEnv* next_env = nullptr) + : interface_(interface), + next_env_(next_env ? next_env : interface->ssa_env_) { + interface_->SetEnv(env); + } + ~ScopedSsaEnv() { + interface_->ssa_env_->Kill(); + interface_->SetEnv(next_env_); + } + + private: + WasmGraphBuildingInterface* interface_; + SsaEnv* next_env_; + }; + TFNode* effect() { return builder_->effect(); } TFNode* control() { return builder_->control(); } @@ -1841,13 +1924,15 @@ class WasmGraphBuildingInterface { return node; } + // TODO(choongwoo): Clear locals of `success_env` after use. SsaEnv* success_env = Steal(decoder->zone(), ssa_env_); success_env->control = if_success; SsaEnv* exception_env = Split(decoder->zone(), success_env); exception_env->control = if_exception; exception_env->effect = if_exception; - SetEnv(exception_env); + + ScopedSsaEnv scoped_env(this, exception_env, success_env); // If the exceptional operation could have modified memory size, we need to // reload the memory context into the exceptional control path. @@ -1885,8 +1970,6 @@ class WasmGraphBuildingInterface { // generating a LoopExit if needed in the inlining code. dangling_exceptions_.Add(if_exception, effect(), control()); } - - SetEnv(success_env); return node; } @@ -2011,8 +2094,6 @@ class WasmGraphBuildingInterface { ssa_env_->effect = effect(); } SsaEnv* result = zone->New(std::move(*from)); - // Restore the length of {from->locals} after applying move-constructor. - from->locals.resize(result->locals.size()); result->state = SsaEnv::kReached; return result; } @@ -2269,7 +2350,7 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator, Zone zone(allocator, ZONE_NAME); WasmFullDecoder decoder( &zone, module, enabled, detected, body, builder, func_index, - inlined_status); + inlined_status, &zone); if (node_origins) { builder->AddBytecodePositionDecorator(node_origins, &decoder); } From 8a795a593ea726d4e49ca00f2a19ab85853ddb4d Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Tue, 13 Dec 2022 18:28:13 +0800 Subject: [PATCH 054/654] [csa] Avoid calling to Builtin::StringEqual if possible Check strings length before calling to Builtin::StringEqual. If length is not equal, the strings must also be not equal. Change-Id: I4f8c2e72720d0919b3fd57013d06dcc8d83f2ab4 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4090410 Commit-Queue: Hao A Xu Reviewed-by: Igor Sheludko Reviewed-by: Tobias Tebbi Cr-Commit-Position: refs/heads/main@{#85005} --- src/builtins/builtins-collections-gen.cc | 5 +-- src/builtins/builtins-date-gen.cc | 17 +++---- src/builtins/builtins-definitions.h | 2 +- src/builtins/builtins-string-gen.cc | 15 +++---- src/builtins/builtins-string-gen.h | 3 +- src/codegen/code-stub-assembler.cc | 54 ++++++++++++++++++----- src/codegen/code-stub-assembler.h | 23 ++++++++++ src/codegen/interface-descriptors.h | 11 +++++ src/compiler/effect-control-linearizer.cc | 32 +++++++++++--- 9 files changed, 119 insertions(+), 43 deletions(-) diff --git a/src/builtins/builtins-collections-gen.cc b/src/builtins/builtins-collections-gen.cc index 9e78c49adc..76600cc1ca 100644 --- a/src/builtins/builtins-collections-gen.cc +++ b/src/builtins/builtins-collections-gen.cc @@ -1293,10 +1293,7 @@ void CollectionsBuiltinsAssembler::SameValueZeroString( GotoIf(TaggedIsSmi(candidate_key), if_not_same); GotoIfNot(IsString(CAST(candidate_key)), if_not_same); - Branch(TaggedEqual(CallBuiltin(Builtin::kStringEqual, NoContextConstant(), - key_string, candidate_key), - TrueConstant()), - if_same, if_not_same); + BranchIfStringEqual(key_string, CAST(candidate_key), if_same, if_not_same); } void CollectionsBuiltinsAssembler::SameValueZeroBigInt( diff --git a/src/builtins/builtins-date-gen.cc b/src/builtins/builtins-date-gen.cc index f8ca669d1d..e04a18f2b9 100644 --- a/src/builtins/builtins-date-gen.cc +++ b/src/builtins/builtins-date-gen.cc @@ -207,18 +207,11 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) { // Slow-case with actual string comparisons. GotoIf(TaggedIsSmi(hint), &hint_is_invalid); GotoIfNot(IsString(CAST(hint)), &hint_is_invalid); - GotoIf(TaggedEqual( - CallBuiltin(Builtin::kStringEqual, context, hint, number_string), - TrueConstant()), - &hint_is_number); - GotoIf(TaggedEqual( - CallBuiltin(Builtin::kStringEqual, context, hint, default_string), - TrueConstant()), - &hint_is_string); - GotoIf(TaggedEqual( - CallBuiltin(Builtin::kStringEqual, context, hint, string_string), - TrueConstant()), - &hint_is_string); + + TNode hint_length = LoadStringLengthAsWord(CAST(hint)); + GotoIfStringEqual(CAST(hint), hint_length, number_string, &hint_is_number); + GotoIfStringEqual(CAST(hint), hint_length, default_string, &hint_is_string); + GotoIfStringEqual(CAST(hint), hint_length, string_string, &hint_is_string); Goto(&hint_is_invalid); // Use the OrdinaryToPrimitive builtin to convert to a Number. diff --git a/src/builtins/builtins-definitions.h b/src/builtins/builtins-definitions.h index c8deb23d9f..3107fb21e3 100644 --- a/src/builtins/builtins-definitions.h +++ b/src/builtins/builtins-definitions.h @@ -160,7 +160,7 @@ namespace internal { \ /* String helpers */ \ TFC(StringFromCodePointAt, StringAtAsString) \ - TFC(StringEqual, Compare) \ + TFC(StringEqual, StringEqual) \ TFC(StringGreaterThan, Compare) \ TFC(StringGreaterThanOrEqual, Compare) \ TFC(StringLessThan, Compare) \ diff --git a/src/builtins/builtins-string-gen.cc b/src/builtins/builtins-string-gen.cc index 42fed47456..28e74b6b09 100644 --- a/src/builtins/builtins-string-gen.cc +++ b/src/builtins/builtins-string-gen.cc @@ -124,17 +124,15 @@ TNode StringBuiltinsAssembler::SearchOneByteInOneByteString( } void StringBuiltinsAssembler::GenerateStringEqual(TNode left, - TNode right) { + TNode right, + TNode length) { TVARIABLE(String, var_left, left); TVARIABLE(String, var_right, right); Label if_equal(this), if_notequal(this), if_indirect(this, Label::kDeferred), restart(this, {&var_left, &var_right}); - TNode lhs_length = LoadStringLengthAsWord(left); - TNode rhs_length = LoadStringLengthAsWord(right); - - // Strings with different lengths cannot be equal. - GotoIf(WordNotEqual(lhs_length, rhs_length), &if_notequal); + CSA_DCHECK(this, IntPtrEqual(LoadStringLengthAsWord(left), length)); + CSA_DCHECK(this, IntPtrEqual(LoadStringLengthAsWord(right), length)); Goto(&restart); BIND(&restart); @@ -144,7 +142,7 @@ void StringBuiltinsAssembler::GenerateStringEqual(TNode left, TNode lhs_instance_type = LoadInstanceType(lhs); TNode rhs_instance_type = LoadInstanceType(rhs); - StringEqual_Core(lhs, lhs_instance_type, rhs, rhs_instance_type, lhs_length, + StringEqual_Core(lhs, lhs_instance_type, rhs, rhs_instance_type, length, &if_equal, &if_notequal, &if_indirect); BIND(&if_indirect); @@ -716,7 +714,8 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison( TF_BUILTIN(StringEqual, StringBuiltinsAssembler) { auto left = Parameter(Descriptor::kLeft); auto right = Parameter(Descriptor::kRight); - GenerateStringEqual(left, right); + auto length = UncheckedParameter(Descriptor::kLength); + GenerateStringEqual(left, right, length); } TF_BUILTIN(StringLessThan, StringBuiltinsAssembler) { diff --git a/src/builtins/builtins-string-gen.h b/src/builtins/builtins-string-gen.h index 55647b9096..64044440e1 100644 --- a/src/builtins/builtins-string-gen.h +++ b/src/builtins/builtins-string-gen.h @@ -104,7 +104,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler { const TNode search_length, const TNode start_position); - void GenerateStringEqual(TNode left, TNode right); + void GenerateStringEqual(TNode left, TNode right, + TNode length); void GenerateStringRelationalComparison(TNode left, TNode right, Operation op); diff --git a/src/codegen/code-stub-assembler.cc b/src/codegen/code-stub-assembler.cc index 3609a19b19..4a505cdcb8 100644 --- a/src/codegen/code-stub-assembler.cc +++ b/src/codegen/code-stub-assembler.cc @@ -13277,12 +13277,16 @@ TNode CodeStubAssembler::Equal(TNode left, TNode right, BIND(&if_left_string); { GotoIfNot(IsStringInstanceType(right_type), &use_symmetry); - result = - CAST(CallBuiltin(Builtin::kStringEqual, context(), left, right)); - CombineFeedback(var_type_feedback, - SmiOr(CollectFeedbackForString(left_type), - CollectFeedbackForString(right_type))); - Goto(&end); + Label combine_feedback(this); + BranchIfStringEqual(CAST(left), CAST(right), &combine_feedback, + &combine_feedback, &result); + BIND(&combine_feedback); + { + CombineFeedback(var_type_feedback, + SmiOr(CollectFeedbackForString(left_type), + CollectFeedbackForString(right_type))); + Goto(&end); + } } BIND(&if_left_number); @@ -13755,9 +13759,7 @@ TNode CodeStubAssembler::StrictEqual( CollectFeedbackForString(rhs_instance_type); *var_type_feedback = SmiOr(lhs_feedback, rhs_feedback); } - result = CAST(CallBuiltin(Builtin::kStringEqual, - NoContextConstant(), lhs, rhs)); - Goto(&end); + BranchIfStringEqual(CAST(lhs), CAST(rhs), &end, &end, &result); } BIND(&if_rhsisnotstring); @@ -13969,6 +13971,36 @@ TNode CodeStubAssembler::StrictEqual( return result.value(); } +void CodeStubAssembler::BranchIfStringEqual(TNode lhs, + TNode lhs_length, + TNode rhs, + TNode rhs_length, + Label* if_true, Label* if_false, + TVariable* result) { + Label length_equal(this), length_not_equal(this); + Branch(IntPtrEqual(lhs_length, rhs_length), &length_equal, &length_not_equal); + + BIND(&length_not_equal); + { + if (result != nullptr) *result = FalseConstant(); + Goto(if_false); + } + + BIND(&length_equal); + { + TNode value = CAST(CallBuiltin( + Builtin::kStringEqual, NoContextConstant(), lhs, rhs, lhs_length)); + if (result != nullptr) { + *result = value; + } + if (if_true == if_false) { + Goto(if_true); + } else { + Branch(TaggedEqual(value, TrueConstant()), if_true, if_false); + } + } +} + // ECMA#sec-samevalue // This algorithm differs from the Strict Equality Comparison Algorithm in its // treatment of signed zeroes and NaNs. @@ -14043,9 +14075,7 @@ void CodeStubAssembler::BranchIfSameValue(TNode lhs, TNode rhs, // Now we can only yield true if {rhs} is also a String // with the same sequence of characters. GotoIfNot(IsString(CAST(rhs)), if_false); - const TNode result = CallBuiltin( - Builtin::kStringEqual, NoContextConstant(), lhs, rhs); - Branch(IsTrue(result), if_true, if_false); + BranchIfStringEqual(CAST(lhs), CAST(rhs), if_true, if_false); } BIND(&if_lhsisbigint); diff --git a/src/codegen/code-stub-assembler.h b/src/codegen/code-stub-assembler.h index e59fdef9fc..78689c0835 100644 --- a/src/codegen/code-stub-assembler.h +++ b/src/codegen/code-stub-assembler.h @@ -3739,6 +3739,29 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode StrictEqual(TNode lhs, TNode rhs, TVariable* var_type_feedback = nullptr); + void GotoIfStringEqual(TNode lhs, TNode lhs_length, + TNode rhs, Label* if_true) { + Label if_false(this); + TNode rhs_length = LoadStringLengthAsWord(rhs); + BranchIfStringEqual(lhs, lhs_length, rhs, rhs_length, if_true, &if_false, + nullptr); + + BIND(&if_false); + } + + void BranchIfStringEqual(TNode lhs, TNode rhs, Label* if_true, + Label* if_false, + TVariable* result = nullptr) { + return BranchIfStringEqual(lhs, LoadStringLengthAsWord(lhs), rhs, + LoadStringLengthAsWord(rhs), if_true, if_false, + result); + } + + void BranchIfStringEqual(TNode lhs, TNode lhs_length, + TNode rhs, TNode rhs_length, + Label* if_true, Label* if_false, + TVariable* result = nullptr); + // ECMA#sec-samevalue // Similar to StrictEqual except that NaNs are treated as equal and minus zero // differs from positive zero. diff --git a/src/codegen/interface-descriptors.h b/src/codegen/interface-descriptors.h index e71c017d04..e1fea903a0 100644 --- a/src/codegen/interface-descriptors.h +++ b/src/codegen/interface-descriptors.h @@ -55,6 +55,7 @@ namespace internal { V(CloneObjectBaseline) \ V(CloneObjectWithVector) \ V(Compare) \ + V(StringEqual) \ V(Compare_Baseline) \ V(Compare_WithFeedback) \ V(Construct_Baseline) \ @@ -1563,6 +1564,16 @@ class CompareDescriptor static constexpr inline auto registers(); }; +class StringEqualDescriptor + : public StaticCallInterfaceDescriptor { + public: + DEFINE_PARAMETERS(kLeft, kRight, kLength) + DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft + MachineType::AnyTagged(), // kRight + MachineType::IntPtr()) // kLength + DECLARE_DEFAULT_DESCRIPTOR(StringEqualDescriptor) +}; + class BinaryOpDescriptor : public StaticCallInterfaceDescriptor { public: diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc index edc968302f..0846517d1c 100644 --- a/src/compiler/effect-control-linearizer.cc +++ b/src/compiler/effect-control-linearizer.cc @@ -4495,6 +4495,33 @@ Node* EffectControlLinearizer::LowerStringLength(Node* node) { return __ LoadField(AccessBuilder::ForStringLength(), subject); } +Node* EffectControlLinearizer::LowerStringEqual(Node* node) { + Callable callable = Builtins::CallableFor(isolate(), Builtin::kStringEqual); + Node* lhs = node->InputAt(0); + Node* rhs = node->InputAt(1); + Node* lhs_length = __ LoadField(AccessBuilder::ForStringLength(), lhs); + Node* rhs_length = __ LoadField(AccessBuilder::ForStringLength(), rhs); + + auto if_length_equal = __ MakeLabel(); + auto done = __ MakeLabel(MachineRepresentation::kTagged); + + __ GotoIf(__ Word32Equal(lhs_length, rhs_length), &if_length_equal); + __ Goto(&done, __ FalseConstant()); + + __ Bind(&if_length_equal); + Operator::Properties properties = Operator::kEliminatable; + CallDescriptor::Flags flags = CallDescriptor::kNoFlags; + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + callable.descriptor().GetStackParameterCount(), flags, properties); + Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, + rhs, lhs_length, __ NoContextConstant()); + __ Goto(&done, result); + + __ Bind(&done); + return done.PhiAt(0); +} + Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable, Node* node) { Node* lhs = node->InputAt(0); @@ -4525,11 +4552,6 @@ Node* EffectControlLinearizer::LowerStringSubstring(Node* node) { start, end, __ NoContextConstant()); } -Node* EffectControlLinearizer::LowerStringEqual(Node* node) { - return LowerStringComparison( - Builtins::CallableFor(isolate(), Builtin::kStringEqual), node); -} - Node* EffectControlLinearizer::LowerStringLessThan(Node* node) { return LowerStringComparison( Builtins::CallableFor(isolate(), Builtin::kStringLessThan), node); From b74fc574932db0a0daf7d892d8d542c8de6fd560 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Thu, 22 Dec 2022 19:09:50 -0800 Subject: [PATCH 055/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/92221be..ff6be8b Rolling v8/buildtools/third_party/libc++abi/trunk: https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi/+log/df3cc8e..dc82f30 Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/1651224..ebbb83f Rolling v8/third_party/depot_tools: https://chromium.googlesource.com/chromium/tools/depot_tools/+log/cf31045..0b96058 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221221.3.1..version:11.20221222.1.1 Change-Id: I4d28f339541d7ff5a4f717f92f8a83b511303980 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4121537 Bot-Commit: v8-ci-autoroll-builder Commit-Queue: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85006} --- DEPS | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/DEPS b/DEPS index 8bf011c9db..eea14ae953 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221221.3.1', + 'fuchsia_version': 'version:11.20221222.1.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '92221beb1ec71e7f6847cf4da4496ad8bb23337c', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'ff6be8b34de4264c1b963a344ef2ef6ce6b8ef49', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '134af4c91bb9ab46fe1165ff1cf0f76900fa5a7e', 'buildtools/clang_format/script': @@ -133,7 +133,7 @@ deps = { 'buildtools/third_party/libc++/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '7c5e4b4eb3c5970f5525f62650c5b76f56dd99a8', 'buildtools/third_party/libc++abi/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'df3cc8ecee393c765a7274a4687f8dff3558d590', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'dc82f3042daa8b06d34e51d8492d37ce901a6f8d', 'buildtools/third_party/libunwind/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '5e22a7fe2335161ab267867c8e1be481bf6c8300', 'buildtools/win': { @@ -209,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '1651224cd6bc419f6d1a12ee09074daa9f3ebbdc', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'ebbb83f192fe9ee3214119184001b9ddcfd44fb0', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -217,7 +217,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'cf31045b347e24e6619f2564fdb0c2490f661745', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '0b96058844728db8040a7348cc4c61fde453401a', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { From 53314cdc78aad73d522c556635291e0fab99e1e1 Mon Sep 17 00:00:00 2001 From: pthier Date: Fri, 23 Dec 2022 09:30:06 +0100 Subject: [PATCH 056/654] Re-enable mjsunit/md5 for future/maglev The issue causing this test to fail on the usban bot was fixed with https://crrev.com/fcda478d890caea6bf04a50e6106682b64cf8d5a Bug: v8:13611, v8:13612 Change-Id: I1d0a713160cdddf8623e767d43277a988509bce0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110955 Auto-Submit: Patrick Thier Reviewed-by: Toon Verwaest Commit-Queue: Patrick Thier Cr-Commit-Position: refs/heads/main@{#85007} --- test/mjsunit/mjsunit.status | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index 9f3dba4754..b69d19df1b 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -1507,16 +1507,6 @@ 'never-optimize': [SKIP], }], # variant == maglev -############################################################################## -['variant == future', { - # Because future implies maglev, tests that fail under maglev now also fail - # under future. Thus this section likely supercedes the maglev section above. - # TODO(adamk): Remove the maglev section in preference to this one? - - # https://crbug.com/v8/13612 - 'md5': [SKIP], -}], # variant == future - ############################################################################## ['no_simd_hardware == True', { 'wasm/exceptions-simd': [SKIP], From 3fddd2e9a6e2af30d9daef22d578e46c4c544530 Mon Sep 17 00:00:00 2001 From: pthier Date: Fri, 23 Dec 2022 11:37:28 +0100 Subject: [PATCH 057/654] [maglev][arm64] Fix Int32DecrementWithOverflow Source register has to be 32-bit. Bug: v8:7700 Change-Id: Ica2a51de100d81bc36447988f6508800f87379ae Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4118740 Auto-Submit: Patrick Thier Commit-Queue: Victor Gomes Reviewed-by: Victor Gomes Commit-Queue: Patrick Thier Cr-Commit-Position: refs/heads/main@{#85008} --- src/maglev/arm64/maglev-ir-arm64.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index d4f2aa2365..84170d83a5 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -109,7 +109,7 @@ void Int32DecrementWithOverflow::SetValueLocationConstraints() { void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { - Register value = ToRegister(value_input()); + Register value = ToRegister(value_input()).W(); Register out = ToRegister(result()).W(); __ Subs(out, value, Immediate(1)); // Output register must not be a register input into the eager deopt info. From 6f4a3e0c2f0d877a9bdbc319797e2863b833d306 Mon Sep 17 00:00:00 2001 From: pthier Date: Fri, 23 Dec 2022 11:53:23 +0100 Subject: [PATCH 058/654] [maglev][arm64] Fix calls to TF builtins partly consuming stack args Some TF builtins (*_WithFeedback variants) consume some of the stack arguments and pass the rest through to ASM builtins. We need to seperately align the stack arguments consumed by the TF builtin and the arguments passed to the ASM builtins via TailCall. Bug: v8:7700 Change-Id: I26de558fd961cb35b5c642430f9e2c8523ecf096 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4118741 Reviewed-by: Victor Gomes Commit-Queue: Patrick Thier Cr-Commit-Position: refs/heads/main@{#85009} --- src/maglev/maglev-ir.cc | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 45a5bb9916..86f012cb54 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -1936,7 +1936,13 @@ void CallBuiltin::PushFeedbackAndArguments(MaglevAssembler* masm) { PushArguments(masm); } else if (vector_index == descriptor.GetRegisterParameterCount()) { PassFeedbackSlotInRegister(masm); - PushArguments(masm, feedback().vector); + DCHECK_EQ(descriptor.GetStackArgumentOrder(), StackArgumentOrder::kJS); + // Ensure that the builtin only expects the feedback vector on the stack and + // potentional additional var args are passed through to another builtin. + // This is required to align the stack correctly (e.g. on arm64). + DCHECK_EQ(descriptor.GetStackParameterCount(), 1); + PushArguments(masm); + __ Push(feedback().vector); } else { int slot = feedback().index(); Handle vector = feedback().vector; @@ -2026,12 +2032,15 @@ void CallWithSpread::GenerateCode(MaglevAssembler* masm, if (feedback_.IsValid()) { using D = CallInterfaceDescriptorFor::type; + __ PushReverse(base::make_iterator_range(args_no_spread_begin(), + args_no_spread_end())); + // Receiver needs to be pushed (aligned) separately as it is consumed by + // CallWithSpread_WithFeedback directly while the other arguments on the + // stack are passed through to CallWithSpread. static_assert(D::GetStackParameterIndex(D::kReceiver) == 0); static_assert(D::GetStackParameterCount() == 1); - // Push the receiver twice, as we need it for CallCollectFeedback() and the - // actual call. - __ PushReverse(receiver(), base::make_iterator_range(args_no_spread_begin(), - args_no_spread_end())); + __ Push(receiver()); + __ Move(D::GetRegisterParameter(D::kArgumentsCount), num_args_no_spread()); __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector); __ Move(D::GetRegisterParameter(D::kSlot), feedback().index()); From 6b2c271cfb7d96b7db47c2d4ab36751b07332a8e Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Fri, 23 Dec 2022 13:08:27 +0100 Subject: [PATCH 059/654] [foozzie] Temporarily lower the amount of --future tests Drop --future from 25% to 5% for a few days until all currently open correctness cases associated with --future are fixed. No-Try: true Bug: v8:7700 Change-Id: I161a0adbc767c5cec46409443fe58c634531487c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4114292 Reviewed-by: Toon Verwaest Commit-Queue: Michael Achenbach Cr-Commit-Position: refs/heads/main@{#85010} --- tools/clusterfuzz/foozzie/v8_fuzz_flags.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/clusterfuzz/foozzie/v8_fuzz_flags.json b/tools/clusterfuzz/foozzie/v8_fuzz_flags.json index c7e39668c8..3547de27e7 100644 --- a/tools/clusterfuzz/foozzie/v8_fuzz_flags.json +++ b/tools/clusterfuzz/foozzie/v8_fuzz_flags.json @@ -10,7 +10,7 @@ [0.01, "--thread-pool-size=4"], [0.01, "--thread-pool-size=8"], [0.1, "--interrupt-budget=1000"], - [0.25, "--future"], + [0.05, "--future"], [0.2, "--no-regexp-tier-up"], [0.1, "--regexp-interpret-all"], [0.1, "--regexp-tier-up-ticks=10"], From 0d9bacb958ea7d4bc41df913430a80967988a1c5 Mon Sep 17 00:00:00 2001 From: pthier Date: Fri, 23 Dec 2022 12:36:17 +0100 Subject: [PATCH 060/654] [maglev][arm64] Port Construct and ConstructWithSpread Bug: v8:7700 Change-Id: Icc26c0081cab1f468829d464a36c2b524be8e251 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4120033 Reviewed-by: Victor Gomes Commit-Queue: Patrick Thier Cr-Commit-Position: refs/heads/main@{#85011} --- src/maglev/arm64/maglev-ir-arm64.cc | 2 - src/maglev/maglev-ir.cc | 82 +++++++++++++++++++++++++++++ src/maglev/maglev-ir.h | 14 ++++- src/maglev/x64/maglev-ir-x64.cc | 76 -------------------------- 4 files changed, 94 insertions(+), 80 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 84170d83a5..1817783108 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -119,8 +119,6 @@ void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm, } UNIMPLEMENTED_NODE_WITH_CALL(Float64Ieee754Unary) -UNIMPLEMENTED_NODE_WITH_CALL(Construct) -UNIMPLEMENTED_NODE_WITH_CALL(ConstructWithSpread) UNIMPLEMENTED_NODE_WITH_CALL(ConvertReceiver, mode_) UNIMPLEMENTED_NODE(LoadSignedIntDataViewElement, type_) UNIMPLEMENTED_NODE(LoadDoubleDataViewElement) diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 86f012cb54..4bf0d5cce3 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -2082,6 +2082,88 @@ void CallWithArrayLike::GenerateCode(MaglevAssembler* masm, masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } +// --- +// Arch agnostic construct nodes +// --- + +int Construct::MaxCallStackArgs() const { + using D = Construct_WithFeedbackDescriptor; + return num_args() + D::GetStackParameterCount(); +} +void Construct::SetValueLocationConstraints() { + using D = Construct_WithFeedbackDescriptor; + UseFixed(function(), D::GetRegisterParameter(D::kTarget)); + UseFixed(new_target(), D::GetRegisterParameter(D::kNewTarget)); + UseFixed(context(), kContextRegister); + for (int i = 0; i < num_args(); i++) { + UseAny(arg(i)); + } + DefineAsFixed(this, kReturnRegister0); +} +void Construct::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + using D = Construct_WithFeedbackDescriptor; + DCHECK_EQ(ToRegister(function()), D::GetRegisterParameter(D::kTarget)); + DCHECK_EQ(ToRegister(new_target()), D::GetRegisterParameter(D::kNewTarget)); + DCHECK_EQ(ToRegister(context()), kContextRegister); + + __ PushReverse(base::make_iterator_range(args_begin(), args_end())); + // Feedback needs to be pushed (aligned) separately as it is consumed by + // Construct_WithFeedback directly while the other arguments on the stack + // are passed through to Construct. + static_assert(D::GetStackParameterIndex(D::kFeedbackVector) == 0); + static_assert(D::GetStackParameterCount() == 1); + __ Push(feedback().vector); + + __ Move(D::GetRegisterParameter(D::kActualArgumentsCount), num_args()); + __ Move(D::GetRegisterParameter(D::kSlot), feedback().index()); + + __ CallBuiltin(Builtin::kConstruct_WithFeedback); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); +} + +int ConstructWithSpread::MaxCallStackArgs() const { + int argc_no_spread = num_args() - 1; + using D = CallInterfaceDescriptorFor< + Builtin::kConstructWithSpread_WithFeedback>::type; + return argc_no_spread + D::GetStackParameterCount(); +} +void ConstructWithSpread::SetValueLocationConstraints() { + using D = CallInterfaceDescriptorFor< + Builtin::kConstructWithSpread_WithFeedback>::type; + UseFixed(function(), D::GetRegisterParameter(D::kTarget)); + UseFixed(new_target(), D::GetRegisterParameter(D::kNewTarget)); + UseFixed(context(), kContextRegister); + for (int i = 0; i < num_args() - 1; i++) { + UseAny(arg(i)); + } + UseFixed(spread(), D::GetRegisterParameter(D::kSpread)); + DefineAsFixed(this, kReturnRegister0); +} +void ConstructWithSpread::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + using D = CallInterfaceDescriptorFor< + Builtin::kConstructWithSpread_WithFeedback>::type; + DCHECK_EQ(ToRegister(function()), D::GetRegisterParameter(D::kTarget)); + DCHECK_EQ(ToRegister(new_target()), D::GetRegisterParameter(D::kNewTarget)); + DCHECK_EQ(ToRegister(context()), kContextRegister); + __ PushReverse( + base::make_iterator_range(args_no_spread_begin(), args_no_spread_end())); + + // Feedback needs to be pushed (aligned) separately as it is consumed by + // Construct_WithFeedback directly while the other arguments on the stack + // are passed through to Construct. + static_assert(D::GetStackParameterIndex(D::kFeedbackVector) == 0); + static_assert(D::GetStackParameterCount() == 1); + __ Push(feedback().vector); + + __ Move(D::GetRegisterParameter(D::kActualArgumentsCount), + num_args_no_spread()); + __ Move(D::GetRegisterParameter(D::kSlot), feedback().index()); + __ CallBuiltin(Builtin::kConstructWithSpread_WithFeedback); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); +} + // --- // Arch agnostic control nodes // --- diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index 96177c4bb2..3a9ab04289 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -4960,6 +4960,9 @@ class Construct : public ValueNodeT { void set_arg(int i, ValueNode* node) { set_input(i + kFixedInputCount, node); } + auto args_begin() { return std::make_reverse_iterator(&arg(-1)); } + auto args_end() { return std::make_reverse_iterator(&arg(num_args() - 1)); } + compiler::FeedbackSource feedback() const { return feedback_; } void VerifyInputs(MaglevGraphLabeller* graph_labeller) const; @@ -5156,8 +5159,7 @@ class CallWithSpread : public ValueNodeT { } Input& spread() { // Spread is the last argument/input. - DCHECK_GT(num_args(), 0); - return arg(num_args() - 1); + return input(input_count() - 1); } Input& receiver() { return arg(0); } compiler::FeedbackSource feedback() const { return feedback_; } @@ -5282,6 +5284,10 @@ class ConstructWithSpread : public ValueNodeT { Input& context() { return input(kContextIndex); } const Input& context() const { return input(kContextIndex); } int num_args() const { return input_count() - kFixedInputCount; } + int num_args_no_spread() const { + DCHECK_GT(num_args(), 0); + return num_args() - 1; + } Input& arg(int i) { return input(i + kFixedInputCount); } void set_arg(int i, ValueNode* node) { set_input(i + kFixedInputCount, node); @@ -5290,6 +5296,10 @@ class ConstructWithSpread : public ValueNodeT { // Spread is the last argument/input. return input(input_count() - 1); } + auto args_no_spread_begin() { return std::make_reverse_iterator(&arg(-1)); } + auto args_no_spread_end() { + return std::make_reverse_iterator(&arg(num_args_no_spread() - 1)); + } compiler::FeedbackSource feedback() const { return feedback_; } void VerifyInputs(MaglevGraphLabeller* graph_labeller) const; diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 7101871b68..2bd00dbdd7 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -2478,82 +2478,6 @@ void CheckedTruncateFloat64ToUint32::GenerateCode( __ bind(&check_done); } -int Construct::MaxCallStackArgs() const { - using D = Construct_WithFeedbackDescriptor; - return num_args() + D::GetStackParameterCount(); -} -void Construct::SetValueLocationConstraints() { - using D = Construct_WithFeedbackDescriptor; - UseFixed(function(), D::GetRegisterParameter(D::kTarget)); - UseFixed(new_target(), D::GetRegisterParameter(D::kNewTarget)); - UseFixed(context(), kContextRegister); - for (int i = 0; i < num_args(); i++) { - UseAny(arg(i)); - } - DefineAsFixed(this, kReturnRegister0); -} -void Construct::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - using D = Construct_WithFeedbackDescriptor; - DCHECK_EQ(ToRegister(function()), D::GetRegisterParameter(D::kTarget)); - DCHECK_EQ(ToRegister(new_target()), D::GetRegisterParameter(D::kNewTarget)); - DCHECK_EQ(ToRegister(context()), kContextRegister); - - for (int i = num_args() - 1; i >= 0; --i) { - __ Push(arg(i)); - } - static_assert(D::GetStackParameterIndex(D::kFeedbackVector) == 0); - static_assert(D::GetStackParameterCount() == 1); - __ Push(feedback().vector); - - uint32_t arg_count = num_args(); - __ Move(D::GetRegisterParameter(D::kActualArgumentsCount), arg_count); - __ Move(D::GetRegisterParameter(D::kSlot), feedback().index()); - - __ CallBuiltin(Builtin::kConstruct_WithFeedback); - masm->DefineExceptionHandlerAndLazyDeoptPoint(this); -} - -int ConstructWithSpread::MaxCallStackArgs() const { - int argc_no_spread = num_args() - 1; - using D = CallInterfaceDescriptorFor< - Builtin::kConstructWithSpread_WithFeedback>::type; - return argc_no_spread + D::GetStackParameterCount(); -} -void ConstructWithSpread::SetValueLocationConstraints() { - using D = CallInterfaceDescriptorFor< - Builtin::kConstructWithSpread_WithFeedback>::type; - UseFixed(function(), D::GetRegisterParameter(D::kTarget)); - UseFixed(new_target(), D::GetRegisterParameter(D::kNewTarget)); - UseFixed(context(), kContextRegister); - for (int i = 0; i < num_args() - 1; i++) { - UseAny(arg(i)); - } - UseFixed(spread(), D::GetRegisterParameter(D::kSpread)); - DefineAsFixed(this, kReturnRegister0); -} -void ConstructWithSpread::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - using D = CallInterfaceDescriptorFor< - Builtin::kConstructWithSpread_WithFeedback>::type; - DCHECK_EQ(ToRegister(function()), D::GetRegisterParameter(D::kTarget)); - DCHECK_EQ(ToRegister(new_target()), D::GetRegisterParameter(D::kNewTarget)); - DCHECK_EQ(ToRegister(context()), kContextRegister); - // Push other arguments (other than the spread) to the stack. - int argc_no_spread = num_args() - 1; - for (int i = argc_no_spread - 1; i >= 0; --i) { - __ Push(arg(i)); - } - static_assert(D::GetStackParameterIndex(D::kFeedbackVector) == 0); - static_assert(D::GetStackParameterCount() == 1); - __ Push(feedback().vector); - - __ Move(D::GetRegisterParameter(D::kActualArgumentsCount), argc_no_spread); - __ Move(D::GetRegisterParameter(D::kSlot), feedback().index()); - __ CallBuiltin(Builtin::kConstructWithSpread_WithFeedback); - masm->DefineExceptionHandlerAndLazyDeoptPoint(this); -} - int ConvertReceiver::MaxCallStackArgs() const { using D = CallInterfaceDescriptorFor::type; return D::GetStackParameterCount(); From 0c7da9f0eee5310626f2addc2b2d83d31f157f82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Fl=C3=BCckiger?= Date: Fri, 23 Dec 2022 11:01:09 +0000 Subject: [PATCH 061/654] [static-roots] Branchless access to the SoleReadOnlyHeap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Many locations use the generic GetReadOnlyRoots() function to get access to read only roots. Prominently this happens in Object::IsUndefined and friends. The function is quite inefficient on shared read only root build configurations as it always has to check if the shared read only heap is already initialized or not. However, the heap is only uninitialized during bootstrapping. This CL replaces the branch by a CHECK and deals with the fallout twofold. First, the SoleReadOnlyHeap is initialized earlier, right after all heap objects have been created. Second, any location that calls GetReadOnlyRoots() during initialization is fixed to access the isolate-local read only roots table instead of the shared one. Bug: v8:13466 Change-Id: I03e3e68eeefc8651818700629f69ff6163ceb6ac Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4116776 Commit-Queue: Olivier Flückiger Reviewed-by: Jakob Linke Reviewed-by: Toon Verwaest Auto-Submit: Olivier Flückiger Cr-Commit-Position: refs/heads/main@{#85012} --- src/heap/factory.cc | 15 ++++++++++----- src/heap/read-only-heap-inl.h | 14 ++++++++------ src/heap/read-only-heap.cc | 12 +++++++++--- src/heap/read-only-heap.h | 7 +++++-- src/heap/setup-heap-internal.cc | 34 +++++++++++++++++---------------- src/objects/fixed-array-inl.h | 4 ++-- src/objects/hash-table-inl.h | 3 ++- src/objects/heap-object.h | 4 ++++ src/objects/map-inl.h | 8 ++++++++ src/objects/map.h | 5 +++++ src/objects/objects-inl.h | 4 ++++ src/objects/objects.cc | 2 +- src/objects/string.cc | 2 +- 13 files changed, 77 insertions(+), 37 deletions(-) diff --git a/src/heap/factory.cc b/src/heap/factory.cc index 4b115eb041..c89fb56ba0 100644 --- a/src/heap/factory.cc +++ b/src/heap/factory.cc @@ -1230,8 +1230,15 @@ Symbol Factory::NewSymbolInternal(AllocationType allocation) { int hash = isolate()->GenerateIdentityHash(Name::HashBits::kMax); symbol.set_raw_hash_field( Name::CreateHashFieldValue(hash, Name::HashFieldType::kHash)); - symbol.set_description(read_only_roots().undefined_value(), - SKIP_WRITE_BARRIER); + if (isolate()->read_only_heap()->roots_init_complete()) { + symbol.set_description(read_only_roots().undefined_value(), + SKIP_WRITE_BARRIER); + } else { + // Can't use setter during bootstrapping as its typecheck tries to access + // the roots table before it is initialized. + TaggedField::store(symbol, Symbol::kDescriptionOffset, + read_only_roots().undefined_value()); + } symbol.set_flags(0); DCHECK(!symbol.is_private()); return symbol; @@ -2075,9 +2082,7 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size, map.set_bit_field3(bit_field3); map.set_instance_type(type); ReadOnlyRoots ro_roots(roots); - HeapObject raw_null_value = ro_roots.null_value(); - map.set_prototype(raw_null_value, SKIP_WRITE_BARRIER); - map.set_constructor_or_back_pointer(raw_null_value, SKIP_WRITE_BARRIER); + map.init_prototype_and_constructor_or_back_pointer(ro_roots); map.set_instance_size(instance_size); if (map.IsJSObjectMap()) { DCHECK(!ReadOnlyHeap::Contains(map)); diff --git a/src/heap/read-only-heap-inl.h b/src/heap/read-only-heap-inl.h index 0c12828584..c3528c57b1 100644 --- a/src/heap/read-only-heap-inl.h +++ b/src/heap/read-only-heap-inl.h @@ -19,14 +19,16 @@ ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) { Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr()))); #else #ifdef V8_SHARED_RO_HEAP - // This fails if we are creating heap objects and the roots haven't yet been - // copied into the read-only heap. auto* shared_ro_heap = SoleReadOnlyHeap::shared_ro_heap_; - if (shared_ro_heap != nullptr && shared_ro_heap->init_complete_) { - return ReadOnlyRoots(shared_ro_heap->read_only_roots_); - } -#endif // V8_SHARED_RO_HEAP + // If this check fails in code that runs during initialization make sure to + // load the ReadOnlyRoots from an isolate instead. + // TODO(olivf, v8:13466): Relax this to a DCHECK once we are sure we got it + // right everywhere. + CHECK(shared_ro_heap && shared_ro_heap->roots_init_complete()); + return ReadOnlyRoots(shared_ro_heap->read_only_roots_); +#else return ReadOnlyRoots(GetHeapFromWritableObject(object)); +#endif // V8_SHARED_RO_HEAP #endif // V8_COMPRESS_POINTERS } diff --git a/src/heap/read-only-heap.cc b/src/heap/read-only-heap.cc index 6c93d78c3f..9d47c6ccf9 100644 --- a/src/heap/read-only-heap.cc +++ b/src/heap/read-only-heap.cc @@ -120,9 +120,17 @@ void ReadOnlyHeap::DeserializeIntoIsolate(Isolate* isolate, DCHECK_NOT_NULL(read_only_snapshot_data); ReadOnlyDeserializer des(isolate, read_only_snapshot_data, can_rehash); des.DeserializeIntoIsolate(); + OnCreateRootsComplete(isolate); InitFromIsolate(isolate); } +void ReadOnlyHeap::OnCreateRootsComplete(Isolate* isolate) { + DCHECK_NOT_NULL(isolate); + DCHECK(!roots_init_complete_); + if (IsReadOnlySpaceShared()) InitializeFromIsolateRoots(isolate); + roots_init_complete_ = true; +} + void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) { DCHECK_NOT_NULL(isolate); InitFromIsolate(isolate); @@ -173,10 +181,9 @@ void SoleReadOnlyHeap::InitializeFromIsolateRoots(Isolate* isolate) { } void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) { - DCHECK(!init_complete_); + DCHECK(roots_init_complete_); read_only_space_->ShrinkPages(); if (IsReadOnlySpaceShared()) { - InitializeFromIsolateRoots(isolate); std::shared_ptr artifacts( *read_only_artifacts_.Pointer()); @@ -191,7 +198,6 @@ void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) { } else { read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap); } - init_complete_ = true; } void ReadOnlyHeap::OnHeapTearDown(Heap* heap) { diff --git a/src/heap/read-only-heap.h b/src/heap/read-only-heap.h index 4c8af51fc9..9243937eed 100644 --- a/src/heap/read-only-heap.h +++ b/src/heap/read-only-heap.h @@ -57,6 +57,9 @@ class ReadOnlyHeap { // a deserializer was not previously provided to Setup. When V8_SHARED_RO_HEAP // is enabled, this releases the ReadOnlyHeap creation lock. void OnCreateHeapObjectsComplete(Isolate* isolate); + // Indicates that all objects reachable by the read only roots table have been + // set up. + void OnCreateRootsComplete(Isolate* isolate); // Indicates that the current isolate no longer requires the read-only heap // and it may be safely disposed of. virtual void OnHeapTearDown(Heap* heap); @@ -96,7 +99,7 @@ class ReadOnlyHeap { virtual void InitializeFromIsolateRoots(Isolate* isolate) {} virtual bool IsOwnedByIsolate() { return true; } - bool init_complete() { return init_complete_; } + bool roots_init_complete() const { return roots_init_complete_; } protected: friend class ReadOnlyArtifacts; @@ -117,7 +120,7 @@ class ReadOnlyHeap { // (unless sharing is disabled). void InitFromIsolate(Isolate* isolate); - bool init_complete_ = false; + bool roots_init_complete_ = false; ReadOnlySpace* read_only_space_ = nullptr; std::vector read_only_object_cache_; diff --git a/src/heap/setup-heap-internal.cc b/src/heap/setup-heap-internal.cc index 5ce25f16ea..07f62ff1d7 100644 --- a/src/heap/setup-heap-internal.cc +++ b/src/heap/setup-heap-internal.cc @@ -89,8 +89,9 @@ bool IsMutableMap(InstanceType instance_type, ElementsKind elements_kind) { bool SetupIsolateDelegate::SetupHeapInternal(Isolate* isolate) { auto heap = isolate->heap(); - if (!isolate->read_only_heap()->init_complete()) { + if (!isolate->read_only_heap()->roots_init_complete()) { if (!heap->CreateReadOnlyHeapObjects()) return false; + isolate->read_only_heap()->OnCreateRootsComplete(isolate); } #ifdef DEBUG auto ro_size = heap->read_only_space()->Size(); @@ -249,8 +250,7 @@ void Heap::FinalizePartialMap(Map map) { map.set_dependent_code(DependentCode::empty_dependent_code(roots)); map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero())); map.SetInstanceDescriptors(isolate(), roots.empty_descriptor_array(), 0); - map.set_prototype(roots.null_value()); - map.set_constructor_or_back_pointer(roots.null_value()); + map.init_prototype_and_constructor_or_back_pointer(roots); } AllocationResult Heap::Allocate(Handle map, @@ -599,10 +599,11 @@ bool Heap::CreateInitialReadOnlyMaps() { ArrayList::SizeFor(ArrayList::kFirstIndex), AllocationType::kReadOnly); if (!alloc.To(&obj)) return false; obj.set_map_after_allocation(roots.array_list_map(), SKIP_WRITE_BARRIER); - ArrayList::cast(obj).set_length(ArrayList::kFirstIndex); - ArrayList::cast(obj).SetLength(0); + // Unchecked to skip failing checks since required roots are uninitialized. + ArrayList::unchecked_cast(obj).set_length(ArrayList::kFirstIndex); + ArrayList::unchecked_cast(obj).SetLength(0); } - set_empty_array_list(ArrayList::cast(obj)); + set_empty_array_list(ArrayList::unchecked_cast(obj)); { AllocationResult alloc = @@ -861,19 +862,19 @@ void Heap::CreateInitialReadOnlyObjects() { { HandleScope handle_scope(isolate()); -#define PUBLIC_SYMBOL_INIT(_, name, description) \ - Handle name = factory->NewSymbol(AllocationType::kReadOnly); \ - Handle name##d = factory->InternalizeUtf8String(#description); \ - name->set_description(*name##d); \ +#define PUBLIC_SYMBOL_INIT(_, name, description) \ + Handle name = factory->NewSymbol(AllocationType::kReadOnly); \ + Handle name##d = factory->InternalizeUtf8String(#description); \ + TaggedField::store(*name, Symbol::kDescriptionOffset, *name##d); \ roots_table()[RootIndex::k##name] = name->ptr(); PUBLIC_SYMBOL_LIST_GENERATOR(PUBLIC_SYMBOL_INIT, /* not used */) -#define WELL_KNOWN_SYMBOL_INIT(_, name, description) \ - Handle name = factory->NewSymbol(AllocationType::kReadOnly); \ - Handle name##d = factory->InternalizeUtf8String(#description); \ - name->set_is_well_known_symbol(true); \ - name->set_description(*name##d); \ +#define WELL_KNOWN_SYMBOL_INIT(_, name, description) \ + Handle name = factory->NewSymbol(AllocationType::kReadOnly); \ + Handle name##d = factory->InternalizeUtf8String(#description); \ + name->set_is_well_known_symbol(true); \ + TaggedField::store(*name, Symbol::kDescriptionOffset, *name##d); \ roots_table()[RootIndex::k##name] = name->ptr(); WELL_KNOWN_SYMBOL_LIST_GENERATOR(WELL_KNOWN_SYMBOL_INIT, /* not used */) @@ -1023,7 +1024,8 @@ void Heap::CreateInitialMutableObjects() { set_number_string_cache(*factory->NewFixedArray( kInitialNumberStringCacheSize * 2, AllocationType::kOld)); - set_basic_block_profiling_data(roots.empty_array_list()); + // Unchecked to skip failing checks since required roots are uninitialized. + set_basic_block_profiling_data(roots.unchecked_empty_array_list()); // Allocate cache for string split and regexp-multiple. set_string_split_cache(*factory->NewFixedArray( diff --git a/src/objects/fixed-array-inl.h b/src/objects/fixed-array-inl.h index 26ffe04941..413cd860e0 100644 --- a/src/objects/fixed-array-inl.h +++ b/src/objects/fixed-array-inl.h @@ -84,7 +84,7 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) { } void FixedArray::set(int index, Smi value) { - DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map()); + DCHECK_NE(map(), EarlyGetReadOnlyRoots().unchecked_fixed_cow_array_map()); DCHECK_LT(static_cast(index), static_cast(length())); DCHECK(Object(value).IsSmi()); int offset = OffsetOfElementAt(index); @@ -92,7 +92,7 @@ void FixedArray::set(int index, Smi value) { } void FixedArray::set(int index, Object value) { - DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map()); + DCHECK_NE(EarlyGetReadOnlyRoots().unchecked_fixed_cow_array_map(), map()); DCHECK(IsFixedArray()); DCHECK_LT(static_cast(index), static_cast(length())); int offset = OffsetOfElementAt(index); diff --git a/src/objects/hash-table-inl.h b/src/objects/hash-table-inl.h index 311d2fac17..c54f973899 100644 --- a/src/objects/hash-table-inl.h +++ b/src/objects/hash-table-inl.h @@ -192,7 +192,8 @@ InternalIndex HashTable::FindInsertionEntry(IsolateT* isolate, template bool HashTable::IsKey(ReadOnlyRoots roots, Object k) { // TODO(leszeks): Dictionaries that don't delete could skip the hole check. - return k != roots.undefined_value() && k != roots.the_hole_value(); + return k != roots.unchecked_undefined_value() && + k != roots.unchecked_the_hole_value(); } template diff --git a/src/objects/heap-object.h b/src/objects/heap-object.h index e1473ae473..3aabaa4503 100644 --- a/src/objects/heap-object.h +++ b/src/objects/heap-object.h @@ -86,6 +86,10 @@ class HeapObject : public Object { // This version is intended to be used for the isolate values produced by // i::GetPtrComprCageBase(HeapObject) function which may return nullptr. inline ReadOnlyRoots GetReadOnlyRoots(PtrComprCageBase cage_base) const; + // This is slower, but safe to call during bootstrapping. On shared read only + // heap configurations it returns the current isolates roots table as opposed + // to the shared one. + inline ReadOnlyRoots EarlyGetReadOnlyRoots() const; // Whether the object is in the RO heap and the RO heap is shared, or in the // writable shared heap. diff --git a/src/objects/map-inl.h b/src/objects/map-inl.h index a83a060337..6876284a5c 100644 --- a/src/objects/map-inl.h +++ b/src/objects/map-inl.h @@ -66,6 +66,14 @@ DEF_GETTER(Map, prototype_info, Object) { RELEASE_ACQUIRE_ACCESSORS(Map, prototype_info, Object, kTransitionsOrPrototypeInfoOffset) +void Map::init_prototype_and_constructor_or_back_pointer(ReadOnlyRoots roots) { + HeapObject null = roots.null_value(); + TaggedField::store(*this, + null); + TaggedField::store(*this, null); +} + // |bit_field| fields. // Concurrent access to |has_prototype_slot| and |has_non_instance_prototype| // is explicitly allowlisted here. The former is never modified after the map diff --git a/src/objects/map.h b/src/objects/map.h index 6914a51150..73f341c99e 100644 --- a/src/objects/map.h +++ b/src/objects/map.h @@ -568,6 +568,11 @@ class Map : public TorqueGeneratedMap { Isolate* isolate, Handle map, Handle prototype, bool enable_prototype_setup_mode = true); + // Sets prototype and constructor fields to null. Can be called during + // bootstrapping. + inline void init_prototype_and_constructor_or_back_pointer( + ReadOnlyRoots roots); + // [constructor]: points back to the function or FunctionTemplateInfo // responsible for this map. // The field overlaps with the back pointer. All maps in a transition tree diff --git a/src/objects/objects-inl.h b/src/objects/objects-inl.h index 680e870981..01605903c9 100644 --- a/src/objects/objects-inl.h +++ b/src/objects/objects-inl.h @@ -796,6 +796,10 @@ void HeapObject::VerifySmiField(int offset) { #endif +ReadOnlyRoots HeapObject::EarlyGetReadOnlyRoots() const { + return ReadOnlyRoots(GetHeapFromWritableObject(*this)); +} + ReadOnlyRoots HeapObject::GetReadOnlyRoots() const { return ReadOnlyHeap::GetReadOnlyRoots(*this); } diff --git a/src/objects/objects.cc b/src/objects/objects.cc index a2c56b1caf..f0ff154e13 100644 --- a/src/objects/objects.cc +++ b/src/objects/objects.cc @@ -5872,7 +5872,7 @@ template void HashTable::Rehash(PtrComprCageBase cage_base) { DisallowGarbageCollection no_gc; WriteBarrierMode mode = GetWriteBarrierMode(no_gc); - ReadOnlyRoots roots = GetReadOnlyRoots(cage_base); + ReadOnlyRoots roots = EarlyGetReadOnlyRoots(); uint32_t capacity = Capacity(); bool done = false; for (int probe = 1; !done; probe++) { diff --git a/src/objects/string.cc b/src/objects/string.cc index 6877bce16b..867d2e30ea 100644 --- a/src/objects/string.cc +++ b/src/objects/string.cc @@ -1705,7 +1705,7 @@ uint32_t String::ComputeAndSetRawHash( DCHECK_IMPLIES(!v8_flags.shared_string_table, !HasHashCode()); // Store the hash code in the object. - uint64_t seed = HashSeed(GetReadOnlyRoots()); + uint64_t seed = HashSeed(EarlyGetReadOnlyRoots()); size_t start = 0; String string = *this; PtrComprCageBase cage_base = GetPtrComprCageBase(string); From b42d19ed116545e6ec0cd4b7358520746ea5c97c Mon Sep 17 00:00:00 2001 From: Toon Verwaest Date: Fri, 23 Dec 2022 14:52:22 +0100 Subject: [PATCH 062/654] [maglev] Also drop existing merges in a liveness hole It's possible that various branches merged already with a value that's in a liveness hole, but we only figure out later. If so, drop the merge as well. Bug: v8:7700, chromium:1403399 Change-Id: Ifd97e0c1959ffe51017e400fb028041047885a9c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111932 Auto-Submit: Toon Verwaest Reviewed-by: Victor Gomes Commit-Queue: Toon Verwaest Cr-Commit-Position: refs/heads/main@{#85013} --- src/maglev/maglev-regalloc.cc | 40 +++++++++---------- .../maglev/regress/regress-crbug-1403399.js | 36 +++++++++++++++++ 2 files changed, 56 insertions(+), 20 deletions(-) create mode 100644 test/mjsunit/maglev/regress/regress-crbug-1403399.js diff --git a/src/maglev/maglev-regalloc.cc b/src/maglev/maglev-regalloc.cc index 8e4ccc06e5..19f0cf5b68 100644 --- a/src/maglev/maglev-regalloc.cc +++ b/src/maglev/maglev-regalloc.cc @@ -1894,6 +1894,25 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control, return; } + if (node != nullptr && !node->is_loadable() && !node->has_register()) { + // If we have a node already, but can't load it here, we must be in a + // liveness hole for it, so nuke the merge state. + // This can only happen for conversion nodes, as they can split and take + // over the liveness of the node they are converting. + // TODO(v8:7700): Overeager DCHECK. + // DCHECK(node->properties().is_conversion()); + if (v8_flags.trace_maglev_regalloc) { + printing_visitor_->os() << " " << reg << " - can't load " + << PrintNodeLabel(graph_labeller(), node) + << ", dropping the merge\n"; + } + // We always need to be able to restore values on JumpLoop since the value + // is definitely live at the loop header. + CHECK(!control->Is()); + state = {nullptr, initialized_node}; + return; + } + if (merge) { // The register is already occupied with a different node. Figure out // where that node is allocated on the incoming branch. @@ -1926,30 +1945,11 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control, if (v8_flags.trace_maglev_regalloc) { printing_visitor_->os() << " " << reg << " - can't load incoming " - << PrintNodeLabel(graph_labeller(), node) << ", bailing out\n"; + << PrintNodeLabel(graph_labeller(), incoming) << ", bailing out\n"; } return; } - if (node != nullptr && !node->is_loadable() && !node->has_register()) { - // If we have a node already, but can't load it here, we must be in a - // liveness hole for it, so nuke the merge state. - // This can only happen for conversion nodes, as they can split and take - // over the liveness of the node they are converting. - // TODO(v8:7700): Overeager DCHECK. - // DCHECK(node->properties().is_conversion()); - if (v8_flags.trace_maglev_regalloc) { - printing_visitor_->os() << " " << reg << " - can't load " - << PrintNodeLabel(graph_labeller(), node) - << ", dropping the merge\n"; - } - // We always need to be able to restore values on JumpLoop since the value - // is definitely live at the loop header. - CHECK(!control->Is()); - state = {nullptr, initialized_node}; - return; - } - const size_t size = sizeof(RegisterMerge) + predecessor_count * sizeof(compiler::AllocatedOperand); void* buffer = compilation_info_->zone()->Allocate(size); diff --git a/test/mjsunit/maglev/regress/regress-crbug-1403399.js b/test/mjsunit/maglev/regress/regress-crbug-1403399.js new file mode 100644 index 0000000000..b13387771c --- /dev/null +++ b/test/mjsunit/maglev/regress/regress-crbug-1403399.js @@ -0,0 +1,36 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --maglev --allow-natives-syntax + +function __f_0() { + for (let __v_3 = 0; __v_3 < 52; ++__v_3) { + let __v_4 = __v_3 | 0; + switch (__v_4) { + case 28: + if (__v_3 != null && typeof __v_3 == "object") { + try { + Object.defineProperty( { + get: function () { + ({get: function () { + return __v_4; + }}) + } + }); + } catch (e) {} + } + case 29: + case 31: + case 32: + case 33: + __v_4 += 1; + + case 34: + } + } +} +%PrepareFunctionForOptimization(__f_0); +__f_0(); +%OptimizeMaglevOnNextCall(__f_0); +__f_0(); From 1531fec7e62e6be6d1e498c84102b983db500ca5 Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Fri, 23 Dec 2022 15:36:09 +0100 Subject: [PATCH 063/654] [maglev] Fix reuse of property load for unstable maps Even if we have a constant load, if the map is not stable, we cannot guarantee that the load is preserved across side-effecting calls. Fixed: chromium:1403324 Bug: v8:7700 Change-Id: Ib900cf7574711115439e6521ed8cfaa866525e78 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4123532 Reviewed-by: Toon Verwaest Commit-Queue: Victor Gomes Auto-Submit: Victor Gomes Commit-Queue: Toon Verwaest Cr-Commit-Position: refs/heads/main@{#85014} --- src/maglev/maglev-graph-builder.cc | 30 ++++++++++++++++++++------ src/maglev/maglev-graph-builder.h | 2 +- test/mjsunit/maglev/regress-1403324.js | 29 +++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 7 deletions(-) create mode 100644 test/mjsunit/maglev/regress-1403324.js diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index d6384c853a..224dfa80ef 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -1938,7 +1938,7 @@ bool MaglevGraphBuilder::TryBuildPropertyLoad( SetAccumulator(BuildLoadField(access_info, lookup_start_object)); RecordKnownProperty(lookup_start_object, name, current_interpreter_frame_.accumulator(), - access_info.IsFastDataConstant()); + access_info); return true; case compiler::PropertyAccessInfo::kDictionaryProtoDataConstant: return TryFoldLoadDictPrototypeConstant(access_info); @@ -1955,7 +1955,8 @@ bool MaglevGraphBuilder::TryBuildPropertyLoad( DCHECK_EQ(receiver, lookup_start_object); SetAccumulator(AddNewNode({receiver})); RecordKnownProperty(lookup_start_object, name, - current_interpreter_frame_.accumulator(), true); + current_interpreter_frame_.accumulator(), + access_info); return true; } } @@ -1978,7 +1979,7 @@ bool MaglevGraphBuilder::TryBuildPropertyStore( if (TryBuildStoreField(access_info, receiver, access_mode)) { RecordKnownProperty(receiver, name, current_interpreter_frame_.accumulator(), - access_info.IsFastDataConstant()); + access_info); return true; } return false; @@ -2295,9 +2296,26 @@ bool MaglevGraphBuilder::TryBuildElementAccess( } } -void MaglevGraphBuilder::RecordKnownProperty(ValueNode* lookup_start_object, - compiler::NameRef name, - ValueNode* value, bool is_const) { +void MaglevGraphBuilder::RecordKnownProperty( + ValueNode* lookup_start_object, compiler::NameRef name, ValueNode* value, + compiler::PropertyAccessInfo const& access_info) { + bool is_const; + if (access_info.IsFastDataConstant() || access_info.IsStringLength()) { + is_const = true; + // Even if we have a constant load, if the map is not stable, we cannot + // guarantee that the load is preserved across side-effecting calls. + // TODO(v8:7700): It might be possible to track it as const if we know that + // we're still on the main transition tree; and if we add a dependency on + // the stable end-maps of the entire tree. + for (auto& map : access_info.lookup_start_object_maps()) { + if (!map.is_stable()) { + is_const = false; + break; + } + } + } else { + is_const = false; + } auto& loaded_properties = is_const ? known_node_aspects().loaded_constant_properties : known_node_aspects().loaded_properties; diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index acbfa59080..7410665822 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -1309,7 +1309,7 @@ class MaglevGraphBuilder { // subsequent loads. void RecordKnownProperty(ValueNode* lookup_start_object, compiler::NameRef name, ValueNode* value, - bool is_const); + compiler::PropertyAccessInfo const& access_info); bool TryReuseKnownPropertyLoad(ValueNode* lookup_start_object, compiler::NameRef name); diff --git a/test/mjsunit/maglev/regress-1403324.js b/test/mjsunit/maglev/regress-1403324.js new file mode 100644 index 0000000000..51d6630f19 --- /dev/null +++ b/test/mjsunit/maglev/regress-1403324.js @@ -0,0 +1,29 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax --maglev + +function foo(__v_4) { + var __v_5 = function () { + return __v_4; + }(); + var __v_6 = __v_5.x; + arguments[42]; + return __v_6 + __v_5.x; +} +var __v_0 = {x: 24}; +__v_0.g = 43; + +%PrepareFunctionForOptimization(foo); +foo({x: 42}); +foo({x: 42}); + +%OptimizeMaglevOnNextCall(foo); +var __v_3 = {x: 42}; +Object.prototype.__defineGetter__(42, function () { + __v_3.__defineGetter__("x", function () { + }); +}); + +assertEquals(NaN, foo(__v_3)); From 109c8efc55ad02fc8967f5d4c6a8ed4513658305 Mon Sep 17 00:00:00 2001 From: Toon Verwaest Date: Fri, 23 Dec 2022 15:57:56 +0100 Subject: [PATCH 064/654] [maglev] Fix NaN handling after Ucomisd As a drive-by this also fixes property load from smi. We still need to check that we actually have a smi... Bug: v8:7700, chromium:1403280, chromium:1403323 Change-Id: I3c4f050b94550b8d7e4e65f733f9c1dad47941d2 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4120575 Auto-Submit: Toon Verwaest Commit-Queue: Victor Gomes Commit-Queue: Patrick Thier Reviewed-by: Victor Gomes Reviewed-by: Patrick Thier Cr-Commit-Position: refs/heads/main@{#85015} --- src/maglev/arm64/maglev-ir-arm64.cc | 33 +++++++--------- src/maglev/maglev-assembler.h | 20 +++++----- src/maglev/x64/maglev-ir-x64.cc | 38 +++++++++---------- .../maglev/regress/regress-crbug-1403280.js | 14 +++++++ .../maglev/regress/regress-crbug-1403323.js | 15 ++++++++ 5 files changed, 72 insertions(+), 48 deletions(-) create mode 100644 test/mjsunit/maglev/regress/regress-crbug-1403280.js create mode 100644 test/mjsunit/maglev/regress/regress-crbug-1403323.js diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 1817783108..ac0797dd33 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -75,9 +75,9 @@ void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm, // Deopt when result would be -0. static_assert(Int32NegateWithOverflow::kProperties.can_eager_deopt()); - __ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kOverflow); + Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow); __ RecordComment("-- Jump to eager deopt"); - __ Cbz(value, eager_deopt_info()->deopt_entry_label()); + __ Cbz(value, fail); __ negs(out, value); // Output register must not be a register input into the eager deopt info. @@ -242,12 +242,10 @@ void CheckedUint32ToInt32::GenerateCode(MaglevAssembler* masm, Register input_reg = ToRegister(input()).W(); // Check if the top bit is set -- if it is, then this is not a valid int32, // otherwise it is. - // TODO(victorgomes): I am manually creating an eager deopt here, if this is a - // common pattern, maybe abstract to a helper function. static_assert(CheckedUint32ToInt32::kProperties.can_eager_deopt()); - __ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kNotInt32); + Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32); __ RecordComment("-- Jump to eager deopt"); - __ Tbnz(input_reg, 31, eager_deopt_info()->deopt_entry_label()); + __ Tbnz(input_reg, 31, fail); } void ChangeInt32ToFloat64::SetValueLocationConstraints() { @@ -372,8 +370,7 @@ void CheckMaps::GenerateCode(MaglevAssembler* masm, // TODO(victorgomes): This can happen, because we do not emit an unconditional // deopt when we intersect the map sets. if (maps().is_empty()) { - __ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap); - __ B(eager_deopt_info()->deopt_entry_label()); + __ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap); return; } @@ -386,7 +383,7 @@ void CheckMaps::GenerateCode(MaglevAssembler* masm, Condition is_smi = __ CheckSmi(object); if (maps_include_heap_number) { // Smis count as matching the HeapNumber map, so we're done. - __ B(&done); + __ B(&done, is_smi); } else { __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this); } @@ -420,12 +417,10 @@ void CheckMapsWithMigration::SetValueLocationConstraints() { } void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { - __ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap); - // TODO(victorgomes): This can happen, because we do not emit an unconditional // deopt when we intersect the map sets. if (maps().is_empty()) { - __ B(eager_deopt_info()->deopt_entry_label()); + __ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap); return; } @@ -440,9 +435,9 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm, Condition is_smi = __ CheckSmi(object); if (maps_include_heap_number) { // Smis count as matching the HeapNumber map, so we're done. - __ B(*done); + __ B(*done, is_smi); } else { - __ B(eager_deopt_info()->deopt_entry_label(), is_smi); + __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this); } } @@ -518,14 +513,14 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm, }, // If this is the last map to check, we should deopt if we fail. // This is safe to do, since {eager_deopt_info} is ZoneAllocated. - (last_map ? ZoneLabelRef::UnsafeFromLabelPointer( - eager_deopt_info()->deopt_entry_label()) + (last_map ? ZoneLabelRef::UnsafeFromLabelPointer(masm->GetDeoptLabel( + this, DeoptimizeReason::kWrongMap)) : continue_label), done, object, object_map, i, this); } else if (last_map) { // If it is the last map and it is not a migration target, we should deopt // if the check fails. - __ B(eager_deopt_info()->deopt_entry_label(), ne); + __ EmitDeoptIf(ne, DeoptimizeReason::kWrongMap, this); } if (!last_map) { @@ -1277,10 +1272,10 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm, static_assert(kThinStringTagBit > 0); // Deopt if this isn't a string. __ Tst(instance_type, Immediate(kIsNotStringMask)); - __ JumpIf(ne, deopt_info->deopt_entry_label()); + __ EmitEagerDeoptIf(ne, DeoptimizeReason::kWrongMap, node); // Deopt if this isn't a thin string. __ Tst(instance_type, Immediate(kThinStringTagBit)); - __ JumpIf(eq, deopt_info->deopt_entry_label()); + __ EmitEagerDeoptIf(eq, DeoptimizeReason::kWrongMap, node); __ LoadTaggedPointerField( object, FieldMemOperand(object, ThinString::kActualOffset)); if (v8_flags.debug_code) { diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index 7d268dd352..ae652b38f8 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -124,8 +124,8 @@ class MaglevAssembler : public MacroAssembler { inline void JumpToDeferredIf(Condition cond, Function&& deferred_code_gen, Args&&... args); - inline void RegisterEagerDeopt(EagerDeoptInfo* deopt_info, - DeoptimizeReason reason); + template + inline Label* GetDeoptLabel(NodeT* node, DeoptimizeReason reason); template inline void EmitEagerDeopt(NodeT* node, DeoptimizeReason reason); template @@ -456,8 +456,11 @@ inline void MaglevAssembler::JumpToDeferredIf(Condition cond, // Deopt // --- -inline void MaglevAssembler::RegisterEagerDeopt(EagerDeoptInfo* deopt_info, - DeoptimizeReason reason) { +template +inline Label* MaglevAssembler::GetDeoptLabel(NodeT* node, + DeoptimizeReason reason) { + static_assert(NodeT::kProperties.can_eager_deopt()); + EagerDeoptInfo* deopt_info = node->eager_deopt_info(); if (deopt_info->reason() != DeoptimizeReason::kUnknown) { DCHECK_EQ(deopt_info->reason(), reason); } @@ -465,25 +468,22 @@ inline void MaglevAssembler::RegisterEagerDeopt(EagerDeoptInfo* deopt_info, code_gen_state()->PushEagerDeopt(deopt_info); deopt_info->set_reason(reason); } + return node->eager_deopt_info()->deopt_entry_label(); } template inline void MaglevAssembler::EmitEagerDeopt(NodeT* node, DeoptimizeReason reason) { - static_assert(NodeT::kProperties.can_eager_deopt()); - RegisterEagerDeopt(node->eager_deopt_info(), reason); RecordComment("-- Jump to eager deopt"); - Jump(node->eager_deopt_info()->deopt_entry_label()); + Jump(GetDeoptLabel(node, reason)); } template inline void MaglevAssembler::EmitEagerDeoptIf(Condition cond, DeoptimizeReason reason, NodeT* node) { - static_assert(NodeT::kProperties.can_eager_deopt()); - RegisterEagerDeopt(node->eager_deopt_info(), reason); RecordComment("-- Jump to eager deopt"); - JumpIf(cond, node->eager_deopt_info()->deopt_entry_label()); + JumpIf(cond, GetDeoptLabel(node, reason)); } inline void MaglevAssembler::DefineLazyDeoptPoint(LazyDeoptInfo* info) { diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 2bd00dbdd7..7ee3172725 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -230,8 +230,7 @@ void CheckMaps::GenerateCode(MaglevAssembler* masm, // TODO(victorgomes): This can happen, because we do not emit an unconditional // deopt when we intersect the map sets. if (maps().is_empty()) { - __ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap); - __ jmp(eager_deopt_info()->deopt_entry_label()); + __ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap); return; } @@ -244,7 +243,7 @@ void CheckMaps::GenerateCode(MaglevAssembler* masm, Condition is_smi = __ CheckSmi(object); if (maps_include_heap_number) { // Smis count as matching the HeapNumber map, so we're done. - __ jmp(&done); + __ j(is_smi, &done); } else { __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this); } @@ -346,12 +345,10 @@ void CheckMapsWithMigration::SetValueLocationConstraints() { } void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { - __ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap); - // TODO(victorgomes): This can happen, because we do not emit an unconditional // deopt when we intersect the map sets. if (maps().is_empty()) { - __ jmp(eager_deopt_info()->deopt_entry_label()); + __ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap); return; } @@ -366,9 +363,9 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm, Condition is_smi = __ CheckSmi(object); if (maps_include_heap_number) { // Smis count as matching the HeapNumber map, so we're done. - __ jmp(*done); + __ j(is_smi, *done); } else { - __ j(is_smi, eager_deopt_info()->deopt_entry_label()); + __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this); } } @@ -439,14 +436,14 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm, }, // If this is the last map to check, we should deopt if we fail. // This is safe to do, since {eager_deopt_info} is ZoneAllocated. - (last_map ? ZoneLabelRef::UnsafeFromLabelPointer( - eager_deopt_info()->deopt_entry_label()) + (last_map ? ZoneLabelRef::UnsafeFromLabelPointer(masm->GetDeoptLabel( + this, DeoptimizeReason::kWrongMap)) : continue_label), done, object, i, this); } else if (last_map) { // If it is the last map and it is not a migration target, we should deopt // if the check fails. - __ j(not_equal, eager_deopt_info()->deopt_entry_label()); + __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kWrongMap, this); } if (!last_map) { @@ -623,10 +620,10 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm, static_assert(kThinStringTagBit > 0); // Deopt if this isn't a string. __ testw(map_tmp, Immediate(kIsNotStringMask)); - __ j(not_zero, deopt_info->deopt_entry_label()); + __ EmitEagerDeoptIf(not_zero, DeoptimizeReason::kWrongMap, node); // Deopt if this isn't a thin string. __ testb(map_tmp, Immediate(kThinStringTagBit)); - __ j(zero, deopt_info->deopt_entry_label()); + __ EmitEagerDeoptIf(zero, DeoptimizeReason::kWrongMap, node); __ LoadTaggedPointerField( object, FieldOperand(object, ThinString::kActualOffset)); if (v8_flags.debug_code) { @@ -668,7 +665,7 @@ void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm, FIRST_STRING_TYPE, LAST_STRING_TYPE); __ j(below_equal, &is_string); - __ cmpl(kScratchRegister, Immediate(HEAP_NUMBER_TYPE)); + __ cmpw(kScratchRegister, Immediate(HEAP_NUMBER_TYPE)); // The IC will go generic if it encounters something other than a // Number or String key. __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, node); @@ -677,6 +674,9 @@ void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm, { DoubleRegister number_value = node->double_temporaries().first(); DoubleRegister converted_back = kScratchDoubleReg; + // Load the heap number value into a double register. + __ Movsd(number_value, + FieldOperand(object, HeapNumber::kValueOffset)); // Convert the input float64 value to int32. __ Cvttsd2si(result_reg, number_value); // Convert that int32 value back to float64. @@ -684,6 +684,7 @@ void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm, // Check that the result of the float64->int32->float64 is equal to // the input (i.e. that the conversion didn't truncate. __ Ucomisd(number_value, converted_back); + __ EmitEagerDeoptIf(parity_even, DeoptimizeReason::kNotInt32, node); __ j(equal, *done); __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32); } @@ -2425,6 +2426,7 @@ void CheckedTruncateFloat64ToInt32::GenerateCode(MaglevAssembler* masm, // Check that the result of the float64->int32->float64 is equal to the input // (i.e. that the conversion didn't truncate. __ Ucomisd(input_reg, converted_back); + __ EmitEagerDeoptIf(parity_even, DeoptimizeReason::kNotInt32, this); __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, this); // Check if {input} is -0. @@ -2451,14 +2453,15 @@ void CheckedTruncateFloat64ToUint32::GenerateCode( Register result_reg = ToRegister(result()); DoubleRegister converted_back = kScratchDoubleReg; - Label fail; // Convert the input float64 value to uint32. - __ Cvttsd2ui(result_reg, input_reg, &fail); + Label* deopt = __ GetDeoptLabel(this, DeoptimizeReason::kNotUint32); + __ Cvttsd2ui(result_reg, input_reg, deopt); // Convert that uint32 value back to float64. __ Cvtlui2sd(converted_back, result_reg); // Check that the result of the float64->uint32->float64 is equal to the input // (i.e. that the conversion didn't truncate. __ Ucomisd(input_reg, converted_back); + __ EmitEagerDeoptIf(parity_even, DeoptimizeReason::kNotUint32, this); __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotUint32, this); // Check if {input} is -0. @@ -2472,9 +2475,6 @@ void CheckedTruncateFloat64ToUint32::GenerateCode( __ cmpl(high_word32_of_input, Immediate(0)); __ EmitEagerDeoptIf(less, DeoptimizeReason::kNotUint32, this); - __ bind(&fail); - __ EmitEagerDeopt(this, DeoptimizeReason::kNotUint32); - __ bind(&check_done); } diff --git a/test/mjsunit/maglev/regress/regress-crbug-1403280.js b/test/mjsunit/maglev/regress/regress-crbug-1403280.js new file mode 100644 index 0000000000..e6f1b97bed --- /dev/null +++ b/test/mjsunit/maglev/regress/regress-crbug-1403280.js @@ -0,0 +1,14 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --maglev --allow-natives-syntax + +function __f_41() { + return "abc".charCodeAt(undefined/2); +} + +%PrepareFunctionForOptimization(__f_41); +assertEquals(97, __f_41()); +%OptimizeMaglevOnNextCall(__f_41); +assertEquals(97, __f_41()); diff --git a/test/mjsunit/maglev/regress/regress-crbug-1403323.js b/test/mjsunit/maglev/regress/regress-crbug-1403323.js new file mode 100644 index 0000000000..cc18697092 --- /dev/null +++ b/test/mjsunit/maglev/regress/regress-crbug-1403323.js @@ -0,0 +1,15 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --maglev --allow-natives-syntax + +function foo(a) { + if (a.length > 0) {} +} +%PrepareFunctionForOptimization(foo); +foo(false); +foo(false); +foo(4); +%OptimizeMaglevOnNextCall(foo); +assertThrows(foo); From 3386dda5d5654db1cecc7b520a819055d7acce0b Mon Sep 17 00:00:00 2001 From: Milad Fa Date: Fri, 23 Dec 2022 13:52:58 -0500 Subject: [PATCH 065/654] PPC[liftoff]: Implement simd FP trunc saturate Change-Id: I71a0d5e630bf886282989cb314ce2adb967ebaee Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110813 Commit-Queue: Milad Farazmand Reviewed-by: Vasili Skurydzin Cr-Commit-Position: refs/heads/main@{#85016} --- src/codegen/ppc/macro-assembler-ppc.cc | 25 +++++++++++++++++ src/codegen/ppc/macro-assembler-ppc.h | 2 ++ .../backend/ppc/code-generator-ppc.cc | 28 ++----------------- src/wasm/baseline/ppc/liftoff-assembler-ppc.h | 28 +++++++------------ 4 files changed, 39 insertions(+), 44 deletions(-) diff --git a/src/codegen/ppc/macro-assembler-ppc.cc b/src/codegen/ppc/macro-assembler-ppc.cc index 15441e7db4..f1a1d535bc 100644 --- a/src/codegen/ppc/macro-assembler-ppc.cc +++ b/src/codegen/ppc/macro-assembler-ppc.cc @@ -4592,6 +4592,31 @@ void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, vinsertd(dst, scratch, Operand(lane_number)); } +void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, + Simd128Register src, + Simd128Register scratch) { + constexpr int lane_number = 8; + // NaN to 0. + xvcmpeqdp(scratch, src, src); + vand(scratch, src, scratch); + xvcvdpsxws(scratch, scratch); + vextractuw(dst, scratch, Operand(lane_number)); + vinsertw(scratch, dst, Operand(4)); + vxor(dst, dst, dst); + vinsertd(dst, scratch, Operand(lane_number)); +} + +void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, + Simd128Register src, + Simd128Register scratch) { + constexpr int lane_number = 8; + xvcvdpuxws(scratch, src); + vextractuw(dst, scratch, Operand(lane_number)); + vinsertw(scratch, dst, Operand(4)); + vxor(dst, dst, dst); + vinsertd(dst, scratch, Operand(lane_number)); +} + void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3) { diff --git a/src/codegen/ppc/macro-assembler-ppc.h b/src/codegen/ppc/macro-assembler-ppc.h index 14fff32563..79adea4c16 100644 --- a/src/codegen/ppc/macro-assembler-ppc.h +++ b/src/codegen/ppc/macro-assembler-ppc.h @@ -1271,6 +1271,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { V(I64x2Abs) \ V(I32x4Abs) \ V(I32x4SConvertF32x4) \ + V(I32x4TruncSatF64x2SZero) \ + V(I32x4TruncSatF64x2UZero) \ V(I16x8Abs) \ V(I16x8Neg) \ V(I8x16Abs) \ diff --git a/src/compiler/backend/ppc/code-generator-ppc.cc b/src/compiler/backend/ppc/code-generator-ppc.cc index f634d016eb..97ad39c4f7 100644 --- a/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/src/compiler/backend/ppc/code-generator-ppc.cc @@ -2392,6 +2392,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( V(I64x2Abs) \ V(I32x4Abs) \ V(I32x4SConvertF32x4) \ + V(I32x4TruncSatF64x2SZero) \ + V(I32x4TruncSatF64x2UZero) \ V(I16x8Abs) \ V(I16x8Neg) \ V(I8x16Abs) \ @@ -2895,32 +2897,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } #undef MAYBE_REVERSE_BYTES - case kPPC_I32x4TruncSatF64x2SZero: { - constexpr int lane_number = 8; - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - // NaN to 0. - __ vor(kScratchSimd128Reg, src, src); - __ xvcmpeqdp(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg); - __ vand(kScratchSimd128Reg, src, kScratchSimd128Reg); - __ xvcvdpsxws(kScratchSimd128Reg, kScratchSimd128Reg); - __ vextractuw(dst, kScratchSimd128Reg, Operand(lane_number)); - __ vinsertw(kScratchSimd128Reg, dst, Operand(4)); - __ vxor(dst, dst, dst); - __ vinsertd(dst, kScratchSimd128Reg, Operand(lane_number)); - break; - } - case kPPC_I32x4TruncSatF64x2UZero: { - constexpr int lane_number = 8; - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - __ xvcvdpuxws(kScratchSimd128Reg, src); - __ vextractuw(dst, kScratchSimd128Reg, Operand(lane_number)); - __ vinsertw(kScratchSimd128Reg, dst, Operand(4)); - __ vxor(dst, dst, dst); - __ vinsertd(dst, kScratchSimd128Reg, Operand(lane_number)); - break; - } case kPPC_StoreCompressTagged: { size_t index = 0; AddressingMode mode = kMode_None; diff --git a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h index 8a16ac0142..c18aae82ce 100644 --- a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h +++ b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h @@ -2007,14 +2007,16 @@ SIMD_UNOP_LIST(EMIT_SIMD_UNOP) #undef EMIT_SIMD_UNOP #undef SIMD_UNOP_LIST -#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \ - V(f32x4_demote_f64x2_zero, F32x4DemoteF64x2Zero, , void) \ - V(i64x2_abs, I64x2Abs, , void) \ - V(i32x4_abs, I32x4Abs, , void) \ - V(i32x4_sconvert_f32x4, I32x4SConvertF32x4, , void) \ - V(i16x8_abs, I16x8Abs, , void) \ - V(i16x8_neg, I16x8Neg, , void) \ - V(i8x16_abs, I8x16Abs, , void) \ +#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \ + V(f32x4_demote_f64x2_zero, F32x4DemoteF64x2Zero, , void) \ + V(i64x2_abs, I64x2Abs, , void) \ + V(i32x4_abs, I32x4Abs, , void) \ + V(i32x4_sconvert_f32x4, I32x4SConvertF32x4, , void) \ + V(i32x4_trunc_sat_f64x2_s_zero, I32x4TruncSatF64x2SZero, , void) \ + V(i32x4_trunc_sat_f64x2_u_zero, I32x4TruncSatF64x2UZero, , void) \ + V(i16x8_abs, I16x8Abs, , void) \ + V(i16x8_neg, I16x8Neg, , void) \ + V(i8x16_abs, I8x16Abs, , void) \ V(i8x16_neg, I8x16Neg, , void) #define EMIT_SIMD_UNOP_WITH_SCRATCH(name, op, return_val, return_type) \ @@ -2458,16 +2460,6 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, kScratchSimd128Reg); } -void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "i32x4.trunc_sat_f64x2_s_zero"); -} - -void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "i32x4.trunc_sat_f64x2_u_zero"); -} - void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { LoadU64(limit_address, MemOperand(limit_address), r0); CmpU64(sp, limit_address); From 692503619570838e251c3a241057a07cb9e32c6b Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Fri, 23 Dec 2022 19:27:44 -0800 Subject: [PATCH 066/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/ff6be8b..6025acd Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/ebbb83f..1665385 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221222.1.1..version:11.20221223.1.1 Change-Id: I14ec522778eed23878e2a921893bc6ef263f083d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4120769 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85017} --- DEPS | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/DEPS b/DEPS index eea14ae953..31c18028f3 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221222.1.1', + 'fuchsia_version': 'version:11.20221223.1.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'ff6be8b34de4264c1b963a344ef2ef6ce6b8ef49', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '6025acd439ca7cde73ad6f93977d9806565a6c2f', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '134af4c91bb9ab46fe1165ff1cf0f76900fa5a7e', 'buildtools/clang_format/script': @@ -209,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'ebbb83f192fe9ee3214119184001b9ddcfd44fb0', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '1665385c152d22abd9fc0dad1d685dc316700d8b', 'condition': 'checkout_android', }, 'third_party/colorama/src': { From eef61dadf467b4c36c93d189319c6a246473976d Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Sat, 24 Dec 2022 19:11:51 -0800 Subject: [PATCH 067/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/6025acd..6a408e0 Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/1665385..2786327 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221223.1.1..version:11.20221224.3.1 Change-Id: I49f6c8b0773adbbd7f2b6b3a0aeb25349dfff55b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4124556 Bot-Commit: v8-ci-autoroll-builder Commit-Queue: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85018} --- DEPS | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/DEPS b/DEPS index 31c18028f3..635c4df534 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221223.1.1', + 'fuchsia_version': 'version:11.20221224.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '6025acd439ca7cde73ad6f93977d9806565a6c2f', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '6a408e023debeec933a09aa68a18916dd3fc3bf4', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '134af4c91bb9ab46fe1165ff1cf0f76900fa5a7e', 'buildtools/clang_format/script': @@ -209,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '1665385c152d22abd9fc0dad1d685dc316700d8b', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '27863274a2f25b92973c10f29b3c4e476fd01a34', 'condition': 'checkout_android', }, 'third_party/colorama/src': { From 8e063b80663b419ff8a984ff96aa61ab78b7dc37 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Sun, 25 Dec 2022 19:26:41 -0800 Subject: [PATCH 068/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/6a408e0..e3bb433 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221224.3.1..version:11.20221225.2.1 Change-Id: Idbe82931e04d3b8c296dadc9d2d08c0324e6b066 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4124567 Bot-Commit: v8-ci-autoroll-builder Commit-Queue: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85019} --- DEPS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DEPS b/DEPS index 635c4df534..f1c279855e 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221224.3.1', + 'fuchsia_version': 'version:11.20221225.2.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '6a408e023debeec933a09aa68a18916dd3fc3bf4', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e3bb433e5e322a10218ea84b0fb8e06c338a1fe5', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '134af4c91bb9ab46fe1165ff1cf0f76900fa5a7e', 'buildtools/clang_format/script': From cdcb3caf2deacecfe4c4fbd70ce78fdb96b3c208 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Mon, 26 Dec 2022 19:09:41 -0800 Subject: [PATCH 069/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/e3bb433..4e33131 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221225.2.1..version:11.20221226.2.1 Change-Id: I8ff17f992af9cf8a883f6ccec6d067f0fbef07a7 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4124570 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85020} --- DEPS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DEPS b/DEPS index f1c279855e..3054ca573b 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221225.2.1', + 'fuchsia_version': 'version:11.20221226.2.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e3bb433e5e322a10218ea84b0fb8e06c338a1fe5', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '4e331315b661d1a4f130e23178f82e9898ee62b4', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '134af4c91bb9ab46fe1165ff1cf0f76900fa5a7e', 'buildtools/clang_format/script': From a91587d38097981536161e8602d5fbba41467e88 Mon Sep 17 00:00:00 2001 From: Toon Verwaest Date: Fri, 23 Dec 2022 22:10:33 +0100 Subject: [PATCH 070/654] [maglev] Fix Int32ModulusWithOverflow Mark rhs as clobbered since we may negate it. Negate the lhs in rax. Bug: v8:7700, chromium:1403470 Change-Id: I9a26de78fcd8d1db90c1d26617001c0c699c350e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110814 Auto-Submit: Toon Verwaest Reviewed-by: Jakob Linke Commit-Queue: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85021} --- src/maglev/x64/maglev-ir-x64.cc | 4 ++-- .../maglev/regress/regress-crbug-1403470.js | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 test/mjsunit/maglev/regress/regress-crbug-1403470.js diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 7ee3172725..4781ba85d5 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -1423,7 +1423,7 @@ void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm, void Int32ModulusWithOverflow::SetValueLocationConstraints() { UseRegister(left_input()); - UseRegister(right_input()); + UseAndClobberRegister(right_input()); DefineAsFixed(this, rdx); // rax,rdx are clobbered by div. RequireSpecificTemporary(rax); @@ -1473,8 +1473,8 @@ void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm, less, [](MaglevAssembler* masm, ZoneLabelRef done, Register left, Register right, Int32ModulusWithOverflow* node) { - __ negl(left); __ movl(rax, left); + __ negl(rax); __ xorl(rdx, rdx); __ divl(right); __ testl(rdx, rdx); diff --git a/test/mjsunit/maglev/regress/regress-crbug-1403470.js b/test/mjsunit/maglev/regress/regress-crbug-1403470.js new file mode 100644 index 0000000000..65ac15c584 --- /dev/null +++ b/test/mjsunit/maglev/regress/regress-crbug-1403470.js @@ -0,0 +1,14 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --maglev --allow-natives-syntax + +function __f_0( __v_18) { + -1 % 11; + return -1 % 11; +} +%PrepareFunctionForOptimization(__f_0); +assertEquals(-1, __f_0()); +%OptimizeMaglevOnNextCall(__f_0); +assertEquals(-1, __f_0()); From 2b976c1c3f5300432c0ec85783f630b3b0fff528 Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Tue, 27 Dec 2022 12:12:46 +0100 Subject: [PATCH 071/654] [maglev] Fix an outdated fallthrough in AttemptOnStackReplacement .. that should now be a jump to `no_code_for_osr` since AttemptOnStackReplacement is now emitted in deferred code and may thus no longer fall through. Fixed: chromium:1403135 Bug: v8:7700 Change-Id: I3dcd7696dc5a19a0cd955b2eef1538c07b2d6e00 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4124200 Auto-Submit: Jakob Linke Commit-Queue: Jakob Linke Commit-Queue: Darius Mercadier Reviewed-by: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85022} --- src/maglev/x64/maglev-ir-x64.cc | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 4781ba85d5..6781885f47 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -2688,8 +2688,8 @@ void AttemptOnStackReplacement(MaglevAssembler* masm, __ Move(maybe_target_code, kReturnRegister0); } - // A `0` return value means there is no OSR code available yet. Fall - // through for now, OSR code will be picked up once it exists and is + // A `0` return value means there is no OSR code available yet. Continue + // execution in Maglev, OSR code will be picked up once it exists and is // cached on the feedback vector. __ Cmp(maybe_target_code, 0); __ j(equal, *no_code_for_osr, Label::kNear); @@ -2704,10 +2704,11 @@ void AttemptOnStackReplacement(MaglevAssembler* masm, GetGeneralRegistersUsedAsInputs(node->eager_deopt_info())); __ EmitEagerDeopt(node, DeoptimizeReason::kPrepareForOnStackReplacement); } else { - // Fall through. With TF disabled we cannot OSR and thus it doesn't make - // sense to start the process. We do still perform all remaining - // bookkeeping above though, to keep Maglev code behavior roughly the same - // in both configurations. + // Continue execution in Maglev. With TF disabled we cannot OSR and thus it + // doesn't make sense to start the process. We do still perform all + // remaining bookkeeping above though, to keep Maglev code behavior roughly + // the same in both configurations. + __ jmp(*no_code_for_osr, Label::kNear); } } From a788519ab7c36cf085d444f3cd07d2ed2eb6fedd Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Tue, 27 Dec 2022 19:25:55 -0800 Subject: [PATCH 072/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/4e33131..c171e77 Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/2786327..816a551 Rolling v8/third_party/depot_tools: https://chromium.googlesource.com/chromium/tools/depot_tools/+log/0b96058..03af44a Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221226.2.1..version:11.20221227.3.1 Change-Id: I3f81156fa98c1f989319c43760060fcda87e390e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4126756 Bot-Commit: v8-ci-autoroll-builder Commit-Queue: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85023} --- DEPS | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/DEPS b/DEPS index 3054ca573b..e2f252f740 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221226.2.1', + 'fuchsia_version': 'version:11.20221227.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '4e331315b661d1a4f130e23178f82e9898ee62b4', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'c171e777e7164c083caf783d6c104785e9874562', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '134af4c91bb9ab46fe1165ff1cf0f76900fa5a7e', 'buildtools/clang_format/script': @@ -209,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '27863274a2f25b92973c10f29b3c4e476fd01a34', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '816a551043358dcb0a5979d2c3af739948a9217d', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -217,7 +217,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '0b96058844728db8040a7348cc4c61fde453401a', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '03af44a5163e9448e375a6bbe7bef1fc0e2bb205', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { From eb008c433e275f48030a1a35c78e3bb4ebcfe323 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Tue, 27 Dec 2022 19:50:19 -0800 Subject: [PATCH 073/654] Update ICU (trusted) Rolling v8/third_party/icu: https://chromium.googlesource.com/chromium/deps/icu/+log/1b7d391..2c51e5c Update TZ to 2022g (Frank Tang) https://chromium.googlesource.com/chromium/deps/icu/+/2c51e5c [fuchsia] Limit the visibility of ICU targets (Filip Filmar) https://chromium.googlesource.com/chromium/deps/icu/+/9a2f72a [config] Declare the args conditionally (Filip Filmar) https://chromium.googlesource.com/chromium/deps/icu/+/b6d6790 Change-Id: Iea9aa1ada43e2a457a8950e533fc1d1c460e1003 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4126757 Bot-Commit: v8-ci-autoroll-builder Commit-Queue: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85024} --- DEPS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEPS b/DEPS index e2f252f740..bbf95a2e7f 100644 --- a/DEPS +++ b/DEPS @@ -234,7 +234,7 @@ deps = { 'third_party/googletest/src': Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07', 'third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '1b7d391f0528fb3a4976b7541b387ee04f915f83', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '2c51e5cc7e0a06cd4cd7cb2ddbac445af9b475ba', 'third_party/instrumented_libraries': Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '09ba70cfb2c0d01c60684660e357ae200caf2968', 'third_party/ittapi': { From 4914da9d013de83c222b518253cc284773beceb5 Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Wed, 28 Dec 2022 10:06:04 +0100 Subject: [PATCH 074/654] Update comments and names related to stack checks .. and several other minor changes (branch hints, moving code around for better grouping, const). Bug: v8:7700 Change-Id: Ia07aa478a5ae5d1852e4ad2dce39f42743376e65 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128096 Commit-Queue: Darius Mercadier Reviewed-by: Darius Mercadier Auto-Submit: Jakob Linke Commit-Queue: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85025} --- src/execution/isolate.cc | 11 ++++++----- src/execution/isolate.h | 29 ++++++++++++++--------------- src/runtime/runtime-internal.cc | 1 - 3 files changed, 20 insertions(+), 21 deletions(-) diff --git a/src/execution/isolate.cc b/src/execution/isolate.cc index f3a5ccca4b..87abe9cc1f 100644 --- a/src/execution/isolate.cc +++ b/src/execution/isolate.cc @@ -5834,18 +5834,19 @@ void Isolate::clear_cached_icu_objects() { #endif // V8_INTL_SUPPORT -bool StackLimitCheck::HandleInterrupt(Isolate* isolate) { +bool StackLimitCheck::HandleStackOverflowAndTerminationRequest() { DCHECK(InterruptRequested()); - if (HasOverflowed()) { - isolate->StackOverflow(); + if (V8_UNLIKELY(HasOverflowed())) { + isolate_->StackOverflow(); return true; } - if (isolate->stack_guard()->HasTerminationRequest()) { - isolate->TerminateExecution(); + if (V8_UNLIKELY(isolate_->stack_guard()->HasTerminationRequest())) { + isolate_->TerminateExecution(); return true; } return false; } + bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const { StackGuard* stack_guard = isolate_->stack_guard(); #ifdef USE_SIMULATOR diff --git a/src/execution/isolate.h b/src/execution/isolate.h index a32f999fe5..ef389a4b11 100644 --- a/src/execution/isolate.h +++ b/src/execution/isolate.h @@ -2635,34 +2635,33 @@ class StackLimitCheck { } static bool HasOverflowed(LocalIsolate* local_isolate); + // Use this to check for stack-overflow when entering runtime from JS code. + bool JsHasOverflowed(uintptr_t gap = 0) const; + // Use this to check for interrupt request in C++ code. V8_INLINE bool InterruptRequested() { StackGuard* stack_guard = isolate_->stack_guard(); return GetCurrentStackPosition() < stack_guard->climit(); } - // Handle interripts if InterruptRequested was true. + // Precondition: InterruptRequested == true. // Returns true if any interrupt (overflow or termination) was handled, in - // which case the caller should prevent further JS execution. - V8_EXPORT_PRIVATE bool HandleInterrupt(Isolate* isolate); - - // Use this to check for stack-overflow when entering runtime from JS code. - bool JsHasOverflowed(uintptr_t gap = 0) const; + // which case the caller must prevent further JS execution. + V8_EXPORT_PRIVATE bool HandleStackOverflowAndTerminationRequest(); private: - Isolate* isolate_; + Isolate* const isolate_; }; // This macro may be used in context that disallows JS execution. // That is why it checks only for a stack overflow and termination. -#define STACK_CHECK(isolate, result_value) \ - do { \ - StackLimitCheck stack_check(isolate); \ - if (stack_check.InterruptRequested()) { \ - if (stack_check.HandleInterrupt(isolate)) { \ - return result_value; \ - } \ - } \ +#define STACK_CHECK(isolate, result_value) \ + do { \ + StackLimitCheck stack_check(isolate); \ + if (V8_UNLIKELY(stack_check.InterruptRequested()) && \ + V8_UNLIKELY(stack_check.HandleStackOverflowAndTerminationRequest())) { \ + return result_value; \ + } \ } while (false) class StackTraceFailureMessage { diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc index aafb9fe18f..3ef9b2d56e 100644 --- a/src/runtime/runtime-internal.cc +++ b/src/runtime/runtime-internal.cc @@ -367,7 +367,6 @@ Object BytecodeBudgetInterruptWithStackCheck(Isolate* isolate, // We ideally wouldn't actually get StackOverflows here, since we stack // check on bytecode entry, but it's possible that this check fires due to // the runtime function call being what overflows the stack. - // if our function entry return isolate->StackOverflow(); } else if (check.InterruptRequested()) { Object return_value = isolate->stack_guard()->HandleInterrupts(); From dff5fc1b23da4153ac37b6abef5f7886893bc96d Mon Sep 17 00:00:00 2001 From: Darius M Date: Wed, 28 Dec 2022 09:58:19 +0100 Subject: [PATCH 075/654] [maglev] Fix wrong EmitDeoptIf instead of EmitEagerDeoptIf A recent CL refactored some Deopts (https://chromium-review.googlesource.com/c/v8/v8/+/4120575), and mistakenly wrote EmitDeoptIf (which isn't an existing function) instead of EmitEagerDeoptIf, which broke the Arm64 build (which isn't covered by the bots yet). Bug: v8:7700 Change-Id: I1301a26584bd76d1a2608c37c07c58fe990446d3 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128095 Reviewed-by: Jakob Linke Commit-Queue: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85026} --- src/maglev/arm64/maglev-ir-arm64.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index ac0797dd33..2eab7d5e46 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -520,7 +520,7 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm, } else if (last_map) { // If it is the last map and it is not a migration target, we should deopt // if the check fails. - __ EmitDeoptIf(ne, DeoptimizeReason::kWrongMap, this); + __ EmitEagerDeoptIf(ne, DeoptimizeReason::kWrongMap, this); } if (!last_map) { From 28cb67cdec1777bebf44e4b921d81a3ff31cd78c Mon Sep 17 00:00:00 2001 From: Vladimir Nechaev Date: Wed, 28 Dec 2022 10:03:56 +0000 Subject: [PATCH 076/654] Runtime.callFunctionOn supports uniqueContextId Bug: v8:13620 Change-Id: I802deb3325a5c8ac9e7e378d60be591af66e6fee Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4126215 Reviewed-by: Benedikt Meurer Commit-Queue: Vladimir Nechaev Cr-Commit-Position: refs/heads/main@{#85027} --- include/js_protocol.pdl | 7 +++ src/inspector/v8-runtime-agent-impl.cc | 24 ++++++---- src/inspector/v8-runtime-agent-impl.h | 2 +- .../call-function-on-async-expected.txt | 43 +++++++++++++++++- .../runtime/call-function-on-async.js | 45 +++++++++++++++++-- 5 files changed, 106 insertions(+), 15 deletions(-) diff --git a/include/js_protocol.pdl b/include/js_protocol.pdl index 6efcf78785..7960a56f54 100644 --- a/include/js_protocol.pdl +++ b/include/js_protocol.pdl @@ -1402,6 +1402,13 @@ domain Runtime optional string objectGroup # Whether to throw an exception if side effect cannot be ruled out during evaluation. experimental optional boolean throwOnSideEffect + # An alternative way to specify the execution context to call function on. + # Compared to contextId that may be reused across processes, this is guaranteed to be + # system-unique, so it can be used to prevent accidental function call + # in context different than intended (e.g. as a result of navigation across process + # boundaries). + # This is mutually exclusive with `executionContextId`. + experimental optional string uniqueContextId # Whether the result should contain `webDriverValue`, serialized according to # https://w3c.github.io/webdriver-bidi. This is mutually exclusive with `returnByValue`, but # resulting `objectId` is still provided. diff --git a/src/inspector/v8-runtime-agent-impl.cc b/src/inspector/v8-runtime-agent-impl.cc index 9d7f4b0b70..5cb582c0bf 100644 --- a/src/inspector/v8-runtime-agent-impl.cc +++ b/src/inspector/v8-runtime-agent-impl.cc @@ -375,16 +375,22 @@ void V8RuntimeAgentImpl::callFunctionOn( Maybe silent, Maybe returnByValue, Maybe generatePreview, Maybe userGesture, Maybe awaitPromise, Maybe executionContextId, Maybe objectGroup, - Maybe throwOnSideEffect, Maybe generateWebDriverValue, + Maybe throwOnSideEffect, Maybe uniqueContextId, + Maybe generateWebDriverValue, std::unique_ptr callback) { - if (objectId.isJust() && executionContextId.isJust()) { - callback->sendFailure(Response::ServerError( - "ObjectId must not be specified together with executionContextId")); + int justCount = (objectId.isJust() ? 1 : 0) + + (executionContextId.isJust() ? 1 : 0) + + (uniqueContextId.isJust() ? 1 : 0); + if (justCount > 1) { + callback->sendFailure(Response::InvalidParams( + "ObjectId, executionContextId and uniqueContextId must mutually " + "exclude each other")); return; } - if (!objectId.isJust() && !executionContextId.isJust()) { - callback->sendFailure(Response::ServerError( - "Either ObjectId or executionContextId must be specified")); + if (justCount < 1) { + callback->sendFailure( + Response::InvalidParams("Either objectId or executionContextId or " + "uniqueContextId must be specified")); return; } WrapMode wrap_mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview @@ -409,8 +415,8 @@ void V8RuntimeAgentImpl::callFunctionOn( } else { int contextId = 0; Response response = ensureContext(m_inspector, m_session->contextGroupId(), - std::move(executionContextId.fromJust()), - /* uniqueContextId */ {}, &contextId); + std::move(executionContextId), + std::move(uniqueContextId), &contextId); if (!response.IsSuccess()) { callback->sendFailure(response); return; diff --git a/src/inspector/v8-runtime-agent-impl.h b/src/inspector/v8-runtime-agent-impl.h index 9e2ad27e56..4bb0e87114 100644 --- a/src/inspector/v8-runtime-agent-impl.h +++ b/src/inspector/v8-runtime-agent-impl.h @@ -89,7 +89,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend { Maybe generatePreview, Maybe userGesture, Maybe awaitPromise, Maybe executionContextId, Maybe objectGroup, Maybe throwOnSideEffect, - Maybe generateWebDriverValue, + Maybe uniqueContextId, Maybe generateWebDriverValue, std::unique_ptr) override; Response releaseObject(const String16& objectId) override; Response getProperties( diff --git a/test/inspector/runtime/call-function-on-async-expected.txt b/test/inspector/runtime/call-function-on-async-expected.txt index f98fc43bf9..e05a203c7d 100644 --- a/test/inspector/runtime/call-function-on-async-expected.txt +++ b/test/inspector/runtime/call-function-on-async-expected.txt @@ -208,11 +208,50 @@ Running test: testEvaluateOnExecutionContext } } +Running test: testEvaluateOnUniqueExecutionContext +{ + id : + result : { + result : { + description : 70 + type : number + value : 70 + } + } +} + Running test: testPassingBothObjectIdAndExecutionContextId { error : { - code : -32000 - message : ObjectId must not be specified together with executionContextId + code : -32602 + message : ObjectId, executionContextId and uniqueContextId must mutually exclude each other + } + id : +} + +Running test: testPassingBothObjectIdAndExecutionContextUniqueId +{ + error : { + code : -32602 + message : ObjectId, executionContextId and uniqueContextId must mutually exclude each other + } + id : +} + +Running test: testPassingTwoExecutionContextIds +{ + error : { + code : -32602 + message : ObjectId, executionContextId and uniqueContextId must mutually exclude each other + } + id : +} + +Running test: testPassingNeitherContextIdNorObjectId +{ + error : { + code : -32602 + message : Either objectId or executionContextId or uniqueContextId must be specified } id : } diff --git a/test/inspector/runtime/call-function-on-async.js b/test/inspector/runtime/call-function-on-async.js index 70f823c52c..18c60a288c 100644 --- a/test/inspector/runtime/call-function-on-async.js +++ b/test/inspector/runtime/call-function-on-async.js @@ -8,10 +8,12 @@ let callFunctionOn = Protocol.Runtime.callFunctionOn.bind(Protocol.Runtime); let remoteObject1; let remoteObject2; let executionContextId; +let executionContextUniqueId; Protocol.Runtime.enable(); Protocol.Runtime.onExecutionContextCreated(messageObject => { executionContextId = messageObject.params.context.id; + executionContextUniqueId = messageObject.params.context.uniqueId; InspectorTest.runAsyncTestSuite(testSuite); }); @@ -135,15 +137,52 @@ let testSuite = [ })); }, + async function testEvaluateOnUniqueExecutionContext() { + InspectorTest.logMessage(await callFunctionOn({ + uniqueContextId: executionContextUniqueId, + functionDeclaration: '(function(arg) { return this.globalObjectProperty + arg; })', + arguments: prepareArguments([ 28 ]), + returnByValue: true, + generatePreview: false, + awaitPromise: false + })); + }, + async function testPassingBothObjectIdAndExecutionContextId() { InspectorTest.logMessage(await callFunctionOn({ executionContextId, objectId: remoteObject1.objectId, functionDeclaration: '(function() { return 42; })', arguments: prepareArguments([]), - returnByValue: true, - generatePreview: false, - awaitPromise: false + returnByValue: true + })); + }, + + async function testPassingBothObjectIdAndExecutionContextUniqueId() { + InspectorTest.logMessage(await callFunctionOn({ + uniqueContextId: executionContextUniqueId, + objectId: remoteObject1.objectId, + functionDeclaration: '(function() { return 42; })', + arguments: prepareArguments([]), + returnByValue: true + })); + }, + + async function testPassingTwoExecutionContextIds() { + InspectorTest.logMessage(await callFunctionOn({ + executionContextId, + uniqueContextId: executionContextUniqueId, + functionDeclaration: '(function() { return 42; })', + arguments: prepareArguments([]), + returnByValue: true + })); + }, + + async function testPassingNeitherContextIdNorObjectId() { + InspectorTest.logMessage(await callFunctionOn({ + functionDeclaration: '(function() { return 42; })', + arguments: prepareArguments([]), + returnByValue: true })); }, From b8b136cb3662e23b0fb6e2c82c29de08576eee81 Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Wed, 28 Dec 2022 14:06:17 +0100 Subject: [PATCH 077/654] [maglev] Skip the TieringManager on TieringState::kInProgress When the interrupt_budget is exhausted, Maglev code calls into runtime for tiering logic in TieringManager. As always, the runtime call itself has a certain (noticeable) overhead. This CL is an optimization based on the observation that the TieringManager only performs simple bookkeeping if the tiering_state (or osr_tiering_state) is kInProgress. We can avoid the runtime call overhead in this case. Changes are: 1. Extract the interrupt check (= stack check) into generated code s.t. it's separate from tiering logic. Note, combining the interrupt check and tiering logic was a previous optimization to reduce generated code size, introduced in crrev.com/c/3049076. 2. Skip the runtime call to Runtime::kBytecodeBudgetInterrupt if `tiering_state == kInProgress || osr_tiering_state == kInProgress`. Cq-Include-Trybots: luci.v8.try:v8_linux64_fyi_rel Bug: v8:7700 Change-Id: Ibcd416aaea7abdd087741551fa213fa033fe12e8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4127157 Auto-Submit: Jakob Linke Commit-Queue: Darius Mercadier Reviewed-by: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85028} --- src/common/globals.h | 6 ++ src/execution/tiering-manager.cc | 6 +- src/maglev/x64/maglev-ir-x64.cc | 113 ++++++++++++++++++++++++++----- src/runtime/runtime-internal.cc | 4 ++ src/runtime/runtime.h | 1 + 5 files changed, 111 insertions(+), 19 deletions(-) diff --git a/src/common/globals.h b/src/common/globals.h index 517c6e259f..2b3713ea47 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -1899,6 +1899,12 @@ enum class TieringState : int32_t { kLastTieringState = kRequestTurbofan_Concurrent, }; +// The state kInProgress (= an optimization request for this function is +// currently being serviced) currently means that no other tiering action can +// happen. Define this constant so we can static_assert it at related code +// sites. +static constexpr bool kTieringStateInProgressBlocksTierup = true; + // To efficiently check whether a marker is kNone or kInProgress using a single // mask, we expect the kNone to be 0 and kInProgress to be 1 so that we can // mask off the lsb for checking. diff --git a/src/execution/tiering-manager.cc b/src/execution/tiering-manager.cc index cba7722a29..dfe74d2e83 100644 --- a/src/execution/tiering-manager.cc +++ b/src/execution/tiering-manager.cc @@ -277,10 +277,12 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function, const TieringState tiering_state = function.feedback_vector().tiering_state(); const TieringState osr_tiering_state = function.feedback_vector().osr_tiering_state(); + // Attenzione! Update this constant in case the condition below changes. + static_assert(kTieringStateInProgressBlocksTierup); if (V8_UNLIKELY(IsInProgress(tiering_state)) || V8_UNLIKELY(IsInProgress(osr_tiering_state))) { - // Note: This effectively disables OSR for the function while it is being - // compiled. + // Note: This effectively disables further tiering actions (e.g. OSR, or + // tiering up into Maglev) for the function while it is being compiled. TraceInOptimizationQueue(function, calling_code_kind); return; } diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 6781885f47..eddfefb1fe 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -2533,6 +2533,101 @@ void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm, Immediate(amount())); } +namespace { + +void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done, + ReduceInterruptBudget* node, + Register scratch0) { + // First, check for interrupts. + { + Label next; + + // Here, we only care about interrupts since we've already guarded against + // real stack overflows on function entry. + __ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit)); + __ j(above, &next); + + // An interrupt has been requested and we must call into runtime to handle + // it; since we already pay the call cost, combine with the TieringManager + // call. + { + SaveRegisterStateForCall save_register_state(masm, + node->register_snapshot()); + __ Move(kContextRegister, masm->native_context().object()); + __ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset)); + __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1); + save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info()); + } + __ jmp(*done); // All done, continue. + + __ bind(&next); + } + + // No pending interrupts. Call into the TieringManager if needed. + { + // Skip the runtime call if the tiering state is kInProgress. The runtime + // only performs simple bookkeeping in this case, which we can easily + // replicate here in generated code. + // TODO(jgruber): Use the correct feedback vector once Maglev inlining is + // enabled. + Label update_profiler_ticks_and_interrupt_budget; + { + static_assert(kTieringStateInProgressBlocksTierup); + const Register scratch1 = kScratchRegister; + __ Move(scratch0, masm->compilation_info() + ->toplevel_compilation_unit() + ->feedback() + .object()); + + // If tiering_state is kInProgress, skip the runtime call. + __ movzxwl(scratch1, + FieldOperand(scratch0, FeedbackVector::kFlagsOffset)); + __ DecodeField(scratch1); + __ cmpl(scratch1, Immediate(static_cast(TieringState::kInProgress))); + __ j(equal, &update_profiler_ticks_and_interrupt_budget); + + // If osr_tiering_state is kInProgress, skip the runtime call. + __ movzxwl(scratch1, + FieldOperand(scratch0, FeedbackVector::kFlagsOffset)); + __ DecodeField(scratch1); + __ cmpl(scratch1, Immediate(static_cast(TieringState::kInProgress))); + __ j(equal, &update_profiler_ticks_and_interrupt_budget); + } + + { + SaveRegisterStateForCall save_register_state(masm, + node->register_snapshot()); + __ Move(kContextRegister, masm->native_context().object()); + __ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset)); + // Note: must not cause a lazy deopt! + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1); + save_register_state.DefineSafepoint(); + } + __ jmp(*done); + + __ bind(&update_profiler_ticks_and_interrupt_budget); + // We are skipping the call to Runtime::kBytecodeBudgetInterrupt_Maglev + // since the tiering state is kInProgress. Perform bookkeeping that would + // have been done in the runtime function: + __ AssertFeedbackVector(scratch0); + // FeedbackVector::SaturatingIncrementProfilerTicks. + // TODO(jgruber): This isn't saturating and thus we may theoretically + // exceed Smi::kMaxValue. But, 1) this is very unlikely since it'd take + // quite some time to exhaust the budget that many times; and 2) even an + // overflow doesn't hurt us at all. + __ incl(FieldOperand(scratch0, FeedbackVector::kProfilerTicksOffset)); + // JSFunction::SetInterruptBudget. + __ movq(scratch0, MemOperand(rbp, StandardFrameConstants::kFunctionOffset)); + __ LoadTaggedPointerField( + scratch0, FieldOperand(scratch0, JSFunction::kFeedbackCellOffset)); + __ movl(FieldOperand(scratch0, FeedbackCell::kInterruptBudgetOffset), + Immediate(v8_flags.interrupt_budget)); + __ jmp(*done); + } +} + +} // namespace + int ReduceInterruptBudget::MaxCallStackArgs() const { return 1; } void ReduceInterruptBudget::SetValueLocationConstraints() { set_temporaries_needed(1); @@ -2546,23 +2641,7 @@ void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm, __ subl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset), Immediate(amount())); ZoneLabelRef done(masm); - __ JumpToDeferredIf( - less, - [](MaglevAssembler* masm, ZoneLabelRef done, - ReduceInterruptBudget* node) { - { - SaveRegisterStateForCall save_register_state( - masm, node->register_snapshot()); - __ Move(kContextRegister, masm->native_context().object()); - __ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset)); - __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, - 1); - save_register_state.DefineSafepointWithLazyDeopt( - node->lazy_deopt_info()); - } - __ jmp(*done); - }, - done, this); + __ JumpToDeferredIf(less, HandleInterruptsAndTiering, done, this, scratch); __ bind(*done); } diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc index 3ef9b2d56e..3019895b49 100644 --- a/src/runtime/runtime-internal.cc +++ b/src/runtime/runtime-internal.cc @@ -410,6 +410,10 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt_Sparkplug) { return BytecodeBudgetInterrupt(isolate, args, CodeKind::BASELINE); } +RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt_Maglev) { + return BytecodeBudgetInterrupt(isolate, args, CodeKind::MAGLEV); +} + RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptWithStackCheck_Maglev) { return BytecodeBudgetInterruptWithStackCheck(isolate, args, CodeKind::MAGLEV); } diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index ecf50b7c66..860c1c4f8a 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -231,6 +231,7 @@ namespace internal { F(BytecodeBudgetInterruptWithStackCheck_Ignition, 1, 1) \ F(BytecodeBudgetInterrupt_Sparkplug, 1, 1) \ F(BytecodeBudgetInterruptWithStackCheck_Sparkplug, 1, 1) \ + F(BytecodeBudgetInterrupt_Maglev, 1, 1) \ F(BytecodeBudgetInterruptWithStackCheck_Maglev, 1, 1) \ F(NewError, 2, 1) \ F(NewForeign, 0, 1) \ From 0f3036ce091199cb685bc9e89d2e100b74e687e8 Mon Sep 17 00:00:00 2001 From: Darius M Date: Wed, 28 Dec 2022 14:19:01 +0100 Subject: [PATCH 078/654] [maglev][arm64] Fix scratch register shortage on funs with many args The Sub macro sometimes needs a scratch register when the 2nd operand is too large to be encoded in the instruction. The prologue was already reserving 2 scratch registers, which made Sub crash on a DCHECK when trying to reserve one more scratch register. Bug: v8:7700 Change-Id: I995689b8b16e3ef216641f0b6cadbf58f7f3740b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128554 Commit-Queue: Darius Mercadier Reviewed-by: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85029} --- src/maglev/arm64/maglev-assembler-arm64.cc | 2 +- test/mjsunit/maglev/lots-of-args.js | 46 ++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 test/mjsunit/maglev/lots-of-args.js diff --git a/src/maglev/arm64/maglev-assembler-arm64.cc b/src/maglev/arm64/maglev-assembler-arm64.cc index 6d65404948..f30fbb5640 100644 --- a/src/maglev/arm64/maglev-assembler-arm64.cc +++ b/src/maglev/arm64/maglev-assembler-arm64.cc @@ -235,7 +235,6 @@ void MaglevAssembler::Prologue(Graph* graph) { // after building the frame we can quickly precheck both at once. UseScratchRegisterScope temps(this); Register stack_slots_size = temps.AcquireX(); - Register interrupt_stack_limit = temps.AcquireX(); Mov(stack_slots_size, fp); // Round up the stack slots and max call args separately, since both will be // padded by their respective uses. @@ -245,6 +244,7 @@ void MaglevAssembler::Prologue(Graph* graph) { std::max(static_cast(graph->max_deopted_stack_size()), max_stack_slots_used * kSystemPointerSize); Sub(stack_slots_size, stack_slots_size, Immediate(max_stack_size)); + Register interrupt_stack_limit = temps.AcquireX(); LoadStackLimit(interrupt_stack_limit, StackLimitKind::kInterruptStackLimit); Cmp(stack_slots_size, interrupt_stack_limit); diff --git a/test/mjsunit/maglev/lots-of-args.js b/test/mjsunit/maglev/lots-of-args.js new file mode 100644 index 0000000000..9c75cb256d --- /dev/null +++ b/test/mjsunit/maglev/lots-of-args.js @@ -0,0 +1,46 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax --maglev + +function foo( + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x +) { } + +%PrepareFunctionForOptimization(foo); +foo(); +foo(); +%OptimizeMaglevOnNextCall(foo); +foo(); From be9c39d3d95330b58d4a7f53984c1a79bf370f0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Fl=C3=BCckiger?= Date: Wed, 28 Dec 2022 13:33:27 +0000 Subject: [PATCH 079/654] [static-roots] Rename --static-roots to --generate-static-roots MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename the flag in order to avoid confusion with `V8_STATIC_ROOTS_BOOL`. The flag is used by `./mksnapshot` to generate a new static-roots.h file. Bug: v8:13466 Change-Id: Ieb5af89b9839673fd2b8aeef197c104aa3c580aa Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111933 Reviewed-by: Jakob Linke Auto-Submit: Olivier Flückiger Commit-Queue: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85030} --- src/execution/isolate.cc | 7 +++---- src/flags/flag-definitions.h | 5 +++-- src/snapshot/mksnapshot.cc | 6 +++--- src/snapshot/static-roots-gen.cc | 4 ++-- tools/dev/gen-static-roots.py | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/execution/isolate.cc b/src/execution/isolate.cc index 87abe9cc1f..bc0e4bfb45 100644 --- a/src/execution/isolate.cc +++ b/src/execution/isolate.cc @@ -4148,15 +4148,14 @@ VirtualMemoryCage* Isolate::GetPtrComprCodeCageForTesting() { return V8_EXTERNAL_CODE_SPACE_BOOL ? heap_.code_range() : GetPtrComprCage(); } -// If this check fails mksnapshot needs to be built without static roots and -// then called with --static-roots to re-regenerate the static-roots.h file. void Isolate::VerifyStaticRoots() { #if V8_STATIC_ROOTS_BOOL static_assert(ReadOnlyHeap::IsReadOnlySpaceShared(), "Static read only roots are only supported when there is one " "shared read only space per cage"); -#define STATIC_ROOTS_FAILED_MSG \ - "Run `tools/dev/gen-static-roots.py` to update static-roots.h." +#define STATIC_ROOTS_FAILED_MSG \ + "Read-only heap layout changed. Run `tools/dev/gen-static-roots.py` to " \ + "update static-roots.h." static_assert(static_cast(RootIndex::kReadOnlyRootsCount) == StaticReadOnlyRootsPointerTable.size(), STATIC_ROOTS_FAILED_MSG); diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index 4e3bc2be69..e7ddb68c60 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -2020,8 +2020,9 @@ DEFINE_STRING(embedded_src, nullptr, DEFINE_STRING( embedded_variant, nullptr, "Label to disambiguate symbols in embedded data file. (mksnapshot only)") -DEFINE_STRING(static_roots, nullptr, - "Path for writing static-roots.h. (mksnapshot only)") +DEFINE_STRING(static_roots_src, nullptr, + "Path for writing a fresh static-roots.h. (mksnapshot only, " + "build without static roots only)") DEFINE_STRING(startup_src, nullptr, "Write V8 startup as C++ src. (mksnapshot only)") DEFINE_STRING(startup_blob, nullptr, diff --git a/src/snapshot/mksnapshot.cc b/src/snapshot/mksnapshot.cc index ef606e87e1..bc731c0d78 100644 --- a/src/snapshot/mksnapshot.cc +++ b/src/snapshot/mksnapshot.cc @@ -231,7 +231,7 @@ int main(int argc, char** argv) { std::string usage = "Usage: " + std::string(argv[0]) + " [--startup-src=file]" + " [--startup-blob=file]" + " [--embedded-src=file]" + " [--embedded-variant=label]" + - " [--static-roots=file]" + " [--target-arch=arch]" + + " [--static-roots-src=file]" + " [--target-arch=arch]" + " [--target-os=os] [extras]\n\n"; int result = i::FlagList::SetFlagsFromCommandLine( &argc, argv, true, HelpOptions(HelpOptions::kExit, usage.c_str())); @@ -291,8 +291,8 @@ int main(int argc, char** argv) { // That's fine as far as the embedded file writer is concerned. WriteEmbeddedFile(&embedded_writer); - if (i::v8_flags.static_roots) { - i::StaticRootsTableGen::write(i_isolate, i::v8_flags.static_roots); + if (i::v8_flags.static_roots_src) { + i::StaticRootsTableGen::write(i_isolate, i::v8_flags.static_roots_src); } } diff --git a/src/snapshot/static-roots-gen.cc b/src/snapshot/static-roots-gen.cc index 772b340b53..79036ea89c 100644 --- a/src/snapshot/static-roots-gen.cc +++ b/src/snapshot/static-roots-gen.cc @@ -16,8 +16,8 @@ namespace internal { void StaticRootsTableGen::write(Isolate* isolate, const char* file) { CHECK_WITH_MSG(!V8_STATIC_ROOTS_BOOL, - "--static-roots is only supported in builds with " - "v8_enable_static_roots disabled"); + "Re-generating the table of roots is only supported in builds " + "with v8_enable_static_roots disabled"); CHECK(file); static_assert(static_cast(RootIndex::kFirstReadOnlyRoot) == 0); diff --git a/tools/dev/gen-static-roots.py b/tools/dev/gen-static-roots.py index 720d9bf349..eb14926c4e 100755 --- a/tools/dev/gen-static-roots.py +++ b/tools/dev/gen-static-roots.py @@ -116,7 +116,7 @@ for target in [args.configuration]: gn_args = config["gn_args"] build_path = build(build_dir, gn_args) out_file = Path(tempfile.gettempdir()) / f"static-roots-{target}.h" - run([build_path / "mksnapshot", "--static-roots", out_file]) + run([build_path / "mksnapshot", "--static-roots-src", out_file]) target_file = v8_path / config["target"] if not filecmp.cmp(out_file, target_file): shutil.move(out_file, target_file) From 48495722ce32796b5fe2849fea7bac97fff574c8 Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Wed, 28 Dec 2022 10:42:21 +0100 Subject: [PATCH 080/654] [js-fuzzer] Add optimization template for Maglev This makes js-fuzzer use %OptimizeMaglevOnNextCall in 30% of the times when optimization patterns are chosen. Other probabilities of the function-call mutator are tuned a bit to keep using %OptimizeFunctionOnNextCall proportionally similarly to before. (~ some round number preferences... exact choices might not matter much, since the original probability choices are rater arbitrary anyways) Bug: v8:7700 Change-Id: I7727ea27fd956efab6fbee2b4a090213d1d7ff05 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4124118 Reviewed-by: Almothana Athamneh Reviewed-by: Jakob Linke Commit-Queue: Michael Achenbach Cr-Commit-Position: refs/heads/main@{#85031} --- .../mutators/function_call_mutator.js | 11 ++-- .../test/test_mutate_function_calls.js | 17 +++++- .../mutate_function_call_maglev_expected.js | 23 ++++++++ .../mutation_order/output_expected.js | 58 +++++++++++++------ 4 files changed, 83 insertions(+), 26 deletions(-) create mode 100644 tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_maglev_expected.js diff --git a/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js b/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js index 00272fcd55..665550f92b 100644 --- a/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js +++ b/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js @@ -49,7 +49,7 @@ class FunctionCallMutator extends mutator.Mutator { } const probability = random.random(); - if (probability < 0.4) { + if (probability < 0.3) { const randFunc = common.randomFunction(path); if (randFunc) { thisMutator.annotate( @@ -58,11 +58,12 @@ class FunctionCallMutator extends mutator.Mutator { path.node.callee = randFunc; } - } else if (probability < 0.6 && thisMutator.settings.engine == 'V8') { + } else if (probability < 0.7 && thisMutator.settings.engine == 'V8') { const prepareTemplate = babelTemplate( '__V8BuiltinPrepareFunctionForOptimization(ID)'); + const optimizationMode = random.choose(0.7) ? 'Function' : 'Maglev'; const optimizeTemplate = babelTemplate( - '__V8BuiltinOptimizeFunctionOnNextCall(ID)'); + `__V8BuiltinOptimize${optimizationMode}OnNextCall(ID)`); const nodes = [ prepareTemplate({ @@ -86,7 +87,7 @@ class FunctionCallMutator extends mutator.Mutator { thisMutator.insertBeforeSkip( path, _liftExpressionsToStatements(path, nodes)); } - } else if (probability < 0.75 && thisMutator.settings.engine == 'V8') { + } else if (probability < 0.8 && thisMutator.settings.engine == 'V8') { const template = babelTemplate( '__V8BuiltinCompileBaseline(ID)'); @@ -108,7 +109,7 @@ class FunctionCallMutator extends mutator.Mutator { thisMutator.insertBeforeSkip( path, _liftExpressionsToStatements(path, nodes)); } - } else if (probability < 0.85 && + } else if (probability < 0.9 && thisMutator.settings.engine == 'V8') { const template = babelTemplate( '__V8BuiltinDeoptimizeFunction(ID)'); diff --git a/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js b/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js index 292c1c0c7e..70b6038dd6 100644 --- a/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js +++ b/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js @@ -36,14 +36,15 @@ describe('Mutate functions', () => { }); it('is robust without available functions', () => { - sandbox.stub(random, 'random').callsFake(() => { return 0.3; }); + sandbox.stub(random, 'random').callsFake(() => { return 0.2; }); // We just ensure here that mutating this file doesn't throw. loadAndMutate('mutate_function_call.js'); }); - it('optimizes functions in V8', () => { + it('optimizes functions with turbofan in V8', () => { sandbox.stub(random, 'random').callsFake(() => { return 0.5; }); + sandbox.stub(random, 'choose').callsFake(p => true); const source = loadAndMutate('mutate_function_call.js'); const mutated = sourceHelpers.generateCode(source); @@ -51,6 +52,18 @@ describe('Mutate functions', () => { 'mutate_function_call_expected.js', mutated); }); + it('optimizes functions with maglev in V8', () => { + sandbox.stub(random, 'random').callsFake(() => { return 0.5; }); + // False-path takes 'Maglev'. Other calls to choose should return + // true. It's also used to determine if a mutator should be chosen. + sandbox.stub(random, 'choose').callsFake(p => p == 0.7 ? false : true); + + const source = loadAndMutate('mutate_function_call.js'); + const mutated = sourceHelpers.generateCode(source); + helpers.assertExpectedResult( + 'mutate_function_call_maglev_expected.js', mutated); + }); + it('compiles functions in V8 to baseline', () => { sandbox.stub(random, 'random').callsFake(() => { return 0.7; }); diff --git a/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_maglev_expected.js b/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_maglev_expected.js new file mode 100644 index 0000000000..f6b8823635 --- /dev/null +++ b/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_maglev_expected.js @@ -0,0 +1,23 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +%PrepareFunctionForOptimization(__f_0); + +__f_0(1); + +__f_0(1); + +%OptimizeMaglevOnNextCall(__f_0); + +// Original: mutate_function_call.js + +/* FunctionCallMutator: Optimizing __f_0 */ +__f_0(1); + +a = ( +/* FunctionCallMutator: Optimizing __f_0 */ +%PrepareFunctionForOptimization(__f_0), __f_0(1), __f_0(1), %OptimizeMaglevOnNextCall(__f_0), __f_0(1)); +foo(1, ( +/* FunctionCallMutator: Optimizing __f_0 */ +%PrepareFunctionForOptimization(__f_0), __f_0(), __f_0(), %OptimizeMaglevOnNextCall(__f_0), __f_0())); diff --git a/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/output_expected.js b/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/output_expected.js index 642a0ae3d8..ddad108080 100644 --- a/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/output_expected.js +++ b/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/output_expected.js @@ -11,8 +11,8 @@ // Original: mutation_order/input.js try { var __v_0 = - /* NumberMutator: Replaced 1 with 17 */ - 17; + /* NumberMutator: Replaced 1 with -10 */ + -10; } catch (e) {} try { @@ -25,10 +25,10 @@ try { try { var __v_3 = { - /* NumberMutator: Replaced 0 with 5 */ - 5: - /* NumberMutator: Replaced 1 with 13 */ - 13 + /* NumberMutator: Replaced 0 with 8 */ + 8: + /* NumberMutator: Replaced 1 with 3 */ + 3 }; } catch (e) {} @@ -42,14 +42,14 @@ try { try { __f_0(__v_0, - /* NumberMutator: Replaced 3 with -77 */ - -77); + /* NumberMutator: Replaced 3 with 5 */ + 5); } catch (e) {} try { __f_0(__v_0, - /* NumberMutator: Replaced 3 with 12 */ - 12); + /* NumberMutator: Replaced 3 with NaN */ + NaN); } catch (e) {} try { @@ -59,8 +59,8 @@ try { try { /* FunctionCallMutator: Optimizing __f_0 */ __f_0(__v_0, - /* NumberMutator: Replaced 3 with -7 */ - -7); + /* NumberMutator: Replaced 3 with 2 */ + 2); } catch (e) {} function __f_1(__v_6) { @@ -71,7 +71,23 @@ function __f_1(__v_6) { } try { - /* FunctionCallMutator: Replaced __f_0 with __f_0 */ + %PrepareFunctionForOptimization(__f_0); +} catch (e) {} + +try { + __f_0('foo', __v_1); +} catch (e) {} + +try { + __f_0('foo', __v_1); +} catch (e) {} + +try { + %OptimizeFunctionOnNextCall(__f_0); +} catch (e) {} + +try { + /* FunctionCallMutator: Optimizing __f_0 */ __f_0('foo', __v_1); } catch (e) {} @@ -82,18 +98,22 @@ try { try { __f_1( - /* NumberMutator: Replaced 2 with -13 */ - -13, __f_0(__v_0, __v_1)); + /* NumberMutator: Replaced 2 with -10 */ + -10, __f_0(__v_0, __v_1)); } catch (e) {} try { - /* FunctionCallMutator: Replaced __f_0 with __f_1 */ - __f_1(__v_0, __v_1); + /* FunctionCallMutator: Deoptimizing __f_0 */ + __f_0(__v_0, __v_1); +} catch (e) {} + +try { + %DeoptimizeFunction(__f_0); } catch (e) {} try { /* FunctionCallMutator: Replaced __f_1 with __f_1 */ __f_1(__v_1, - /* NumberMutator: Replaced 3 with 7 */ - 7); + /* NumberMutator: Replaced 3 with 16 */ + 16); } catch (e) {} From d65596fc36f3b8362e1f8e04a6c9ce04c569f7a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Fl=C3=BCckiger?= Date: Wed, 28 Dec 2022 13:35:06 +0000 Subject: [PATCH 081/654] [static-roots] Fix performance regression from 4116776 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Addresses two potential performance regressions introduced in https://crrev.com/c/4116776. First, the left-over CHECK is changed to a DCHECK. Second, calling `EarlyGetReadOnlyRoots` should at least be as efficient as `GetReadOnlyRoots` before the earlier change. Bug: v8:13466 Change-Id: I93e9c06ce651cae90e9c969e54ec73e4eab80fd6 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4127159 Reviewed-by: Anton Bikineev Commit-Queue: Olivier Flückiger Reviewed-by: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85032} --- src/heap/read-only-heap-inl.h | 23 +++++++++++++++++------ src/heap/read-only-heap.h | 4 ++++ src/objects/heap-object.h | 4 +--- src/objects/objects-inl.h | 2 +- 4 files changed, 23 insertions(+), 10 deletions(-) diff --git a/src/heap/read-only-heap-inl.h b/src/heap/read-only-heap-inl.h index c3528c57b1..5afa1e9982 100644 --- a/src/heap/read-only-heap-inl.h +++ b/src/heap/read-only-heap-inl.h @@ -12,6 +12,19 @@ namespace v8 { namespace internal { +// static +ReadOnlyRoots ReadOnlyHeap::EarlyGetReadOnlyRoots(HeapObject object) { +#ifdef V8_SHARED_RO_HEAP + auto* shared_ro_heap = SoleReadOnlyHeap::shared_ro_heap_; + if (shared_ro_heap && shared_ro_heap->roots_init_complete()) { + return ReadOnlyRoots(shared_ro_heap->read_only_roots_); + } + return ReadOnlyRoots(GetHeapFromWritableObject(object)); +#else + return GetReadOnlyRoots(object); +#endif // V8_SHARED_RO_HEAP +} + // static ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) { #ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE @@ -20,16 +33,14 @@ ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) { #else #ifdef V8_SHARED_RO_HEAP auto* shared_ro_heap = SoleReadOnlyHeap::shared_ro_heap_; - // If this check fails in code that runs during initialization make sure to - // load the ReadOnlyRoots from an isolate instead. - // TODO(olivf, v8:13466): Relax this to a DCHECK once we are sure we got it - // right everywhere. - CHECK(shared_ro_heap && shared_ro_heap->roots_init_complete()); + // If this check fails in code that runs during initialization use + // EarlyGetReadOnlyRoots instead. + DCHECK(shared_ro_heap && shared_ro_heap->roots_init_complete()); return ReadOnlyRoots(shared_ro_heap->read_only_roots_); #else return ReadOnlyRoots(GetHeapFromWritableObject(object)); #endif // V8_SHARED_RO_HEAP -#endif // V8_COMPRESS_POINTERS +#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE } } // namespace internal diff --git a/src/heap/read-only-heap.h b/src/heap/read-only-heap.h index 9243937eed..94529044b4 100644 --- a/src/heap/read-only-heap.h +++ b/src/heap/read-only-heap.h @@ -77,6 +77,10 @@ class ReadOnlyHeap { // specific roots table. V8_EXPORT_PRIVATE inline static ReadOnlyRoots GetReadOnlyRoots( HeapObject object); + // Returns the current isolates roots table during initialization as opposed + // to the shared one in case the latter is not initialized yet. + V8_EXPORT_PRIVATE inline static ReadOnlyRoots EarlyGetReadOnlyRoots( + HeapObject object); // Extends the read-only object cache with new zero smi and returns a // reference to it. diff --git a/src/objects/heap-object.h b/src/objects/heap-object.h index 3aabaa4503..752e91b61e 100644 --- a/src/objects/heap-object.h +++ b/src/objects/heap-object.h @@ -86,9 +86,7 @@ class HeapObject : public Object { // This version is intended to be used for the isolate values produced by // i::GetPtrComprCageBase(HeapObject) function which may return nullptr. inline ReadOnlyRoots GetReadOnlyRoots(PtrComprCageBase cage_base) const; - // This is slower, but safe to call during bootstrapping. On shared read only - // heap configurations it returns the current isolates roots table as opposed - // to the shared one. + // This is slower, but safe to call during bootstrapping. inline ReadOnlyRoots EarlyGetReadOnlyRoots() const; // Whether the object is in the RO heap and the RO heap is shared, or in the diff --git a/src/objects/objects-inl.h b/src/objects/objects-inl.h index 01605903c9..58c56204ac 100644 --- a/src/objects/objects-inl.h +++ b/src/objects/objects-inl.h @@ -797,7 +797,7 @@ void HeapObject::VerifySmiField(int offset) { #endif ReadOnlyRoots HeapObject::EarlyGetReadOnlyRoots() const { - return ReadOnlyRoots(GetHeapFromWritableObject(*this)); + return ReadOnlyHeap::EarlyGetReadOnlyRoots(*this); } ReadOnlyRoots HeapObject::GetReadOnlyRoots() const { From 4a719abf8e05ccb280e6104adcdc64af949e5d15 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Wed, 28 Dec 2022 19:09:57 -0800 Subject: [PATCH 082/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/c171e77..d9fb377 Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/816a551..fa82fd2 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221227.3.1..version:11.20221228.1.1 Rolling v8/third_party/jinja2: https://chromium.googlesource.com/chromium/src/third_party/jinja2/+log/4633bf4..264c07d Change-Id: I76e336b991407015c2789fdcdb6e27c96733c1e4 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4126919 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85033} --- DEPS | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/DEPS b/DEPS index bbf95a2e7f..e4b98e0122 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221227.3.1', + 'fuchsia_version': 'version:11.20221228.1.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'c171e777e7164c083caf783d6c104785e9874562', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'd9fb377a525169334fb2aa7970f111d672bab811', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '134af4c91bb9ab46fe1165ff1cf0f76900fa5a7e', 'buildtools/clang_format/script': @@ -209,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '816a551043358dcb0a5979d2c3af739948a9217d', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'fa82fd2fd92b7482e28a8257c3cc79d99e0897ff', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -244,7 +244,7 @@ deps = { 'condition': "checkout_ittapi or check_v8_header_includes", }, 'third_party/jinja2': - Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '4633bf431193690c3491244f5a0acbe9ac776233', + Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '264c07d7e64f2874434a3b8039e101ddf1b01e7e', 'third_party/jsoncpp/source': Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448', 'third_party/logdog/logdog': From cfec73d847e8cc327725afaf8025e6021fa383d7 Mon Sep 17 00:00:00 2001 From: QiuJi Date: Thu, 29 Dec 2022 12:43:26 +0800 Subject: [PATCH 083/654] [riscv][regalloc] Port the rest part of "Resolve tail-call gap moves" Port commit 2f4397d652f200501f65b913e8a8a2cc5e4a9404 Bug: chromium:1269989 Change-Id: I5929a605300b9c127e61710585314c3fc50a1aff Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128600 Reviewed-by: Yahan Lu Commit-Queue: Yahan Lu Commit-Queue: ji qiu Auto-Submit: ji qiu Cr-Commit-Position: refs/heads/main@{#85034} --- src/codegen/riscv/macro-assembler-riscv.h | 11 +- .../backend/riscv/code-generator-riscv.cc | 125 ++++++++---------- 2 files changed, 61 insertions(+), 75 deletions(-) diff --git a/src/codegen/riscv/macro-assembler-riscv.h b/src/codegen/riscv/macro-assembler-riscv.h index 6fc51271fa..cbe9c9dd6f 100644 --- a/src/codegen/riscv/macro-assembler-riscv.h +++ b/src/codegen/riscv/macro-assembler-riscv.h @@ -1560,9 +1560,14 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, } struct MoveCycleState { - // Whether a move in the cycle needs the scratch or double scratch register. - bool pending_scratch_register_use = false; - bool pending_double_scratch_register_use = false; + // List of scratch registers reserved for pending moves in a move cycle, and + // which should therefore not be used as a temporary location by + // {MoveToTempLocation}. + RegList scratch_regs; + // Available scratch registers during the move cycle resolution scope. + base::Optional temps; + // Scratch register picked by {MoveToTempLocation}. + base::Optional scratch_reg; }; #define ACCESS_MASM(masm) masm-> diff --git a/src/compiler/backend/riscv/code-generator-riscv.cc b/src/compiler/backend/riscv/code-generator-riscv.cc index 6b227e3e76..65f626b5f7 100644 --- a/src/compiler/backend/riscv/code-generator-riscv.cc +++ b/src/compiler/backend/riscv/code-generator-riscv.cc @@ -4374,94 +4374,75 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) { // Must be kept in sync with {MoveTempLocationTo}. DCHECK(!source->IsImmediate()); auto rep = LocationOperand::cast(source)->representation(); - if ((IsFloatingPoint(rep) && - !move_cycle_.pending_double_scratch_register_use) || - (!IsFloatingPoint(rep) && !move_cycle_.pending_scratch_register_use)) { - // The scratch register for this rep is available. - int scratch_reg_code = - !IsFloatingPoint(rep) ? kScratchReg.code() : kScratchDoubleReg.code(); - AllocatedOperand scratch(LocationOperand::REGISTER, rep, scratch_reg_code); + move_cycle_.temps.emplace(tasm()); + auto& temps = *move_cycle_.temps; + // Temporarily exclude the reserved scratch registers while we pick one to + // resolve the move cycle. Re-include them immediately afterwards as they + // might be needed for the move to the temp location. + temps.Exclude(move_cycle_.scratch_regs); + if (!IsFloatingPoint(rep)) { + if (temps.hasAvailable()) { + Register scratch = move_cycle_.temps->Acquire(); + move_cycle_.scratch_reg.emplace(scratch); + } + } + + temps.Include(move_cycle_.scratch_regs); + + if (move_cycle_.scratch_reg.has_value()) { + // A scratch register is available for this rep. + // auto& scratch_reg = *move_cycle_.scratch_reg; + AllocatedOperand scratch(LocationOperand::REGISTER, rep, + move_cycle_.scratch_reg->code()); AssembleMove(source, &scratch); } else { - // The scratch register is blocked by pending moves. Use the stack instead. - int new_slots = ElementSizeInPointers(rep); - RiscvOperandConverter g(this, nullptr); - if (source->IsRegister()) { - __ Push(g.ToRegister(source)); -#if V8_TARGET_ARCH_RISCV64 - } else if (source->IsStackSlot() || source->IsFloatStackSlot() || - source->IsDoubleStackSlot()) { -#elif V8_TARGET_ARCH_RISCV32 - } else if (source->IsStackSlot() || source->IsFloatStackSlot()) { -#endif - __ LoadWord(kScratchReg, g.ToMemOperand(source)); - __ Push(kScratchReg); - } else { - // Bump the stack pointer and assemble the move. - int last_frame_slot_id = - frame_access_state_->frame()->GetTotalFrameSlotCount() - 1; - int sp_delta = frame_access_state_->sp_delta(); - int temp_slot = last_frame_slot_id + sp_delta + new_slots; - __ SubWord(sp, sp, Operand(new_slots * kSystemPointerSize)); - AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot); - AssembleMove(source, &temp); - } - frame_access_state()->IncreaseSPDelta(new_slots); + // The scratch registers are blocked by pending moves. Use the stack + // instead. + Push(source); } } void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest, MachineRepresentation rep) { - if ((IsFloatingPoint(rep) && - !move_cycle_.pending_double_scratch_register_use) || - (!IsFloatingPoint(rep) && !move_cycle_.pending_scratch_register_use)) { - int scratch_reg_code = - !IsFloatingPoint(rep) ? kScratchReg.code() : kScratchDoubleReg.code(); - AllocatedOperand scratch(LocationOperand::REGISTER, rep, scratch_reg_code); + if (move_cycle_.scratch_reg.has_value()) { + // auto& scratch_reg = *move_cycle_.scratch_reg; + AllocatedOperand scratch(LocationOperand::REGISTER, rep, + move_cycle_.scratch_reg->code()); AssembleMove(&scratch, dest); } else { - RiscvOperandConverter g(this, nullptr); - int new_slots = ElementSizeInPointers(rep); - frame_access_state()->IncreaseSPDelta(-new_slots); - if (dest->IsRegister()) { - __ Pop(g.ToRegister(dest)); -#if V8_TARGET_ARCH_RISCV64 - } else if (dest->IsStackSlot() || dest->IsFloatStackSlot() || - dest->IsDoubleStackSlot()) { -#elif V8_TARGET_ARCH_RISCV32 - } else if (dest->IsStackSlot() || dest->IsFloatStackSlot()) { -#endif - __ Pop(kScratchReg); - __ StoreWord(kScratchReg, g.ToMemOperand(dest)); - } else { - int last_frame_slot_id = - frame_access_state_->frame()->GetTotalFrameSlotCount() - 1; - int sp_delta = frame_access_state_->sp_delta(); - int temp_slot = last_frame_slot_id + sp_delta + new_slots; - AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot); - AssembleMove(&temp, dest); - __ AddWord(sp, sp, Operand(new_slots * kSystemPointerSize)); - } + Pop(dest, rep); } + // Restore the default state to release the {UseScratchRegisterScope} and to + // prepare for the next cycle. move_cycle_ = MoveCycleState(); } void CodeGenerator::SetPendingMove(MoveOperands* move) { - MoveType::Type move_type = - MoveType::InferMove(&move->source(), &move->destination()); - if (move_type == MoveType::kConstantToStack) { + InstructionOperand* src = &move->source(); + InstructionOperand* dst = &move->destination(); + UseScratchRegisterScope temps(tasm()); + if (src->IsConstant() && dst->IsFPLocationOperand()) { + Register temp = temps.Acquire(); + move_cycle_.scratch_regs.set(temp); + } else if (src->IsAnyStackSlot() || dst->IsAnyStackSlot()) { RiscvOperandConverter g(this, nullptr); - Constant src = g.ToConstant(&move->source()); - if (move->destination().IsStackSlot() && - (RelocInfo::IsWasmReference(src.rmode()) || - (src.type() != Constant::kInt32 && src.type() != Constant::kInt64))) { - move_cycle_.pending_scratch_register_use = true; + bool src_need_scratch = false; + bool dst_need_scratch = false; + if (src->IsAnyStackSlot()) { + MemOperand src_mem = g.ToMemOperand(src); + src_need_scratch = + (!is_int16(src_mem.offset())) || (((src_mem.offset() & 0b111) != 0) && + !is_int16(src_mem.offset() + 4)); } - } else if (move_type == MoveType::kStackToStack) { - if (move->source().IsFPLocationOperand()) { - move_cycle_.pending_double_scratch_register_use = true; - } else { - move_cycle_.pending_scratch_register_use = true; + if (dst->IsAnyStackSlot()) { + MemOperand dst_mem = g.ToMemOperand(dst); + dst_need_scratch = + (!is_int16(dst_mem.offset())) || (((dst_mem.offset() & 0b111) != 0) && + !is_int16(dst_mem.offset() + 4)); + } + if (src_need_scratch || dst_need_scratch) { + Register temp = temps.Acquire(); + move_cycle_.scratch_regs.set(temp); } } } From f23bf2752dc3935171479194d8b19ae0425183c4 Mon Sep 17 00:00:00 2001 From: Darius M Date: Wed, 28 Dec 2022 18:32:03 +0100 Subject: [PATCH 084/654] [maglev][arm64] Fix wrong write barrier usage The arm64 version of GeneratorStore was using the write barrier incorrectly, triggering it when it shouldn't be triggered (and vise-versa), and a "PointersFromHere" was mistakenly used instead of a "PointersToHere". The reason for the incorrect ne/eq used in CheckFlags is that this function works a bit differently on x64 and arm64, cf their implementations: - x64: https://source.chromium.org/chromium/chromium/src/+/main:v8/src/codegen/x64/macro-assembler-x64.cc;l=3425;drc=605e46479aca3449a6ba1350a1de7927c76b86ad - arm64: https://source.chromium.org/chromium/chromium/src/+/main:v8/src/codegen/arm64/macro-assembler-arm64.cc;l=3248;drc=dc950c32bd5262d66d845d2bfeb1ff4a17a857bc For an example of both of those functions used for similar purposes, see `MacroAssembler::RecordWrite` in macro-assembler-x64.h and macro-assembler-arm64.h: the former uses `zero` in `CheckFlags`, while the latter uses `ne`. When --stress-maglev and --verify-heap were enabled, this mistake was causing a crash in the heap verifier in the `mjsunit/es6/typedarray-from.js` and `mjsunit/wasm/gc-js-interop.js` benchmarks. I was able to reproduce those crashes on x64 by replacing the "not_equal" in CheckFlags by "equal". Bug: v8:7700 Change-Id: I42316931fba858433317238fc42f7c33985f46ca Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128556 Reviewed-by: Jakob Linke Commit-Queue: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85035} --- src/maglev/arm64/maglev-ir-arm64.cc | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 2eab7d5e46..9ca21ecbdb 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -1387,7 +1387,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); __ CheckPageFlag( value, - MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, eq, + MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, ne, *done); Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister(); @@ -1413,7 +1413,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, // Consider hoisting the check out of the loop and duplicating the loop into // with and without write barrier. __ CheckPageFlag(array, MemoryChunk::kPointersFromHereAreInterestingMask, - ne, &deferred_write_barrier->deferred_code_label); + eq, &deferred_write_barrier->deferred_code_label); __ bind(*done); } @@ -1431,9 +1431,10 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, // TODO(leszeks): The context is almost always going to be in // old-space, consider moving this check to the fast path, maybe even // as the first bailout. - __ CheckPageFlag(context, - MemoryChunk::kPointersFromHereAreInterestingMask, eq, - *done); + __ CheckPageFlag( + context, + MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, ne, + *done); __ Move(WriteBarrierDescriptor::ObjectRegister(), generator); generator = WriteBarrierDescriptor::ObjectRegister(); @@ -1458,7 +1459,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, context, FieldMemOperand(generator, JSGeneratorObject::kContextOffset)); __ AssertNotSmi(context); __ CheckPageFlag(generator, MemoryChunk::kPointersFromHereAreInterestingMask, - ne, &deferred_context_write_barrier->deferred_code_label); + eq, &deferred_context_write_barrier->deferred_code_label); __ bind(*done); UseScratchRegisterScope temps(masm); From b2ab857f39ee23e6d0381471ad769f38a397decd Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Thu, 29 Dec 2022 09:29:14 +0100 Subject: [PATCH 085/654] Properly print minus zero (-0.0) heap numbers .. print them as '-0.0' instead of '0.0'. Change-Id: I425d78e245868e7ff878c07282a0f9d8ca67a8d1 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128558 Reviewed-by: Darius Mercadier Commit-Queue: Darius Mercadier Auto-Submit: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85036} --- src/diagnostics/objects-printer.cc | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index b763468f58..589f6ae2c4 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -2661,9 +2661,13 @@ void HeapNumber::HeapNumberShortPrint(std::ostream& os) { static constexpr int64_t kMaxSafeInteger = -(kMinSafeInteger + 1); double val = value(); - if (val == DoubleToInteger(val) && - val >= static_cast(kMinSafeInteger) && - val <= static_cast(kMaxSafeInteger)) { + if (i::IsMinusZero(val)) { + os << "-0.0"; + } else if (val == DoubleToInteger(val) && + val >= static_cast(kMinSafeInteger) && + val <= static_cast(kMaxSafeInteger)) { + // Print integer HeapNumbers in safe integer range with max precision: as + // 9007199254740991.0 instead of 9.0072e+15 int64_t i = static_cast(val); os << i << ".0"; } else { From 4bbbb521f4267d0f8ec6edd07be595eed82dac9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Fl=C3=BCckiger?= Date: Fri, 23 Dec 2022 13:34:36 +0000 Subject: [PATCH 086/654] Reland "Reland "[static-roots] Enable static roots on supported configurations"" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a reland of commit b247270178dcfffe9af4389dbb84d1643bfccea4 But with static roots disabled on non-external code space builds. Original change's description: > Reland "[static-roots] Enable static roots on supported configurations" > > This is a reland of commit c04ca9cc63417d24455704cbee44eb60b79f7af2 > > Original change's description: > > [static-roots] Enable static roots on supported configurations > > > > The static root values are not actually used yet. > > > > Bug: v8:13466 > > Change-Id: I85fc99277c31e0dd4350a305040ab25456051046 > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4101880 > > Reviewed-by: Toon Verwaest > > Commit-Queue: Olivier Flückiger > > Cr-Commit-Position: refs/heads/main@{#84850} > > Bug: v8:13466 > Change-Id: Id65bb5b19df999dfe930a78993e4bf3343d9f996 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111641 > Auto-Submit: Olivier Flückiger > Reviewed-by: Toon Verwaest > Commit-Queue: Toon Verwaest > Cr-Commit-Position: refs/heads/main@{#84991} Bug: v8:13466 Change-Id: Id1f55c1cf8d349338fd49f6cb0ed7dc2e1054a72 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4123534 Reviewed-by: Toon Verwaest Commit-Queue: Olivier Flückiger Cr-Commit-Position: refs/heads/main@{#85037} --- BUILD.gn | 13 +- src/heap/read-only-spaces.cc | 4 + src/roots/static-roots.h | 753 ++++++++++++++++++++++++++++++- src/snapshot/static-roots-gen.cc | 3 + tools/v8heapconst.py | 474 +++++++++---------- 5 files changed, 1004 insertions(+), 243 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index 26adeb7be9..0620dc16ab 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -121,7 +121,7 @@ declare_args() { v8_enable_snapshot_native_code_counters = "" # Use pre-generated static root pointer values from static-roots.h. - v8_enable_static_roots = false + v8_enable_static_roots = "" # Enable code-generation-time checking of types in the CodeStubAssembler. v8_enable_verify_csa = false @@ -540,11 +540,16 @@ if (v8_enable_sandbox == "") { if (v8_enable_static_roots == "") { # Static roots are only valid for builds with pointer compression and a - # shared ro heap. Also, non-wasm and non-i18n builds have fewer read-only - # roots. + # shared read-only heap. + # TODO(olivf, v8:13466) Some configurations could be supported if we + # introduce different static root files for different build configurations: + # Non-wasm and non-i18n builds have fewer read only roots. Configurations + # without external code space allocate read only roots at a further + # location relative to the cage base. v8_enable_static_roots = v8_enable_pointer_compression && v8_enable_shared_ro_heap && - v8_enable_pointer_compression_shared_cage && v8_enable_webassembly && + v8_enable_pointer_compression_shared_cage && + v8_enable_external_code_space && v8_enable_webassembly && v8_enable_i18n_support } diff --git a/src/heap/read-only-spaces.cc b/src/heap/read-only-spaces.cc index 986fec59be..df277b3782 100644 --- a/src/heap/read-only-spaces.cc +++ b/src/heap/read-only-spaces.cc @@ -561,6 +561,10 @@ void ReadOnlySpace::FreeLinearAllocationArea() { void ReadOnlySpace::EnsurePage() { if (pages_.empty()) EnsureSpaceForAllocation(1); CHECK(!pages_.empty()); + // For all configurations where static roots are supported the read only roots + // are currently allocated in the first page of the cage. + CHECK_IMPLIES(V8_STATIC_ROOTS_BOOL, + heap_->isolate()->cage_base() == pages_.back()->address()); } void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) { diff --git a/src/roots/static-roots.h b/src/roots/static-roots.h index eb4aebd879..eeced43597 100644 --- a/src/roots/static-roots.h +++ b/src/roots/static-roots.h @@ -2,17 +2,766 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +// This file is automatically generated by `tools/dev/gen-static-roots.py`. Do +// not edit manually. + #ifndef V8_ROOTS_STATIC_ROOTS_H_ #define V8_ROOTS_STATIC_ROOTS_H_ #include "src/common/globals.h" + #if V8_STATIC_ROOTS_BOOL +// Disabling Wasm or Intl invalidates the contents of static-roots.h. +// TODO(olivf): To support static roots for multiple build configurations we +// will need to generate target specific versions of this file. +static_assert(V8_ENABLE_WEBASSEMBLY); +static_assert(V8_INTL_SUPPORT); + namespace v8 { namespace internal { -// TODO(olivf, v8:13466): Enable and add static roots -constexpr static std::array StaticReadOnlyRootsPointerTable = {}; +constexpr static std::array StaticReadOnlyRootsPointerTable = { + 0x3235, // free_space_map + 0x38a5, // one_pointer_filler_map + 0x38cd, // two_pointer_filler_map + 0x7b19, // uninitialized_value + 0x22e1, // undefined_value + 0x22fd, // the_hole_value + 0x22c5, // null_value + 0x3f41, // true_value + 0x3f5d, // false_value + 0x543d, // empty_string + 0x2141, // meta_map + 0x31e5, // byte_array_map + 0x2169, // fixed_array_map + 0x21e1, // fixed_cow_array_map + 0x3995, // hash_table_map + 0x2b7d, // symbol_map + 0x2dad, // one_byte_string_map + 0x2f3d, // one_byte_internalized_string_map + 0x2ab5, // scope_info_map + 0x3d05, // shared_function_info_map + 0x3825, // code_map + 0x384d, // cell_map + 0x387d, // global_property_cell_map + 0x2ba5, // foreign_map + 0x2b55, // heap_number_map + 0x396d, // transition_array_map + 0x302d, // thin_one_byte_string_map + 0x2b2d, // feedback_vector_map + 0x3f19, // empty_scope_info + 0x22a9, // empty_fixed_array + 0x2aa5, // empty_descriptor_array + 0x7b51, // arguments_marker + 0x7bc9, // exception + 0x7b89, // termination_exception + 0x7be5, // optimized_out + 0x7c1d, // stale_register + 0x3bc5, // script_context_table_map + 0x2b05, // closure_feedback_cell_array_map + 0x31bd, // feedback_metadata_map + 0x3b9d, // array_list_map + 0x2d5d, // bigint_map + 0x3bed, // object_boilerplate_description_map + 0x320d, // bytecode_array_map + 0x3d7d, // code_data_container_map + 0x3c15, // coverage_info_map + 0x3195, // fixed_double_array_map + 0x3a85, // global_dictionary_map + 0x3945, // many_closures_cell_map + 0x2bcd, // mega_dom_handler_map + 0x2add, // module_info_map + 0x3a35, // name_dictionary_map + 0x38f5, // no_closures_cell_map + 0x3aad, // number_dictionary_map + 0x391d, // one_closure_cell_map + 0x39bd, // ordered_hash_map_map + 0x39e5, // ordered_hash_set_map + 0x3afd, // name_to_index_hash_table_map + 0x3b25, // registered_symbol_table_map + 0x3a0d, // ordered_name_dictionary_map + 0x3cdd, // preparse_data_map + 0x325d, // property_array_map + 0x3c3d, // accessor_info_map + 0x3c65, // side_effect_call_handler_info_map + 0x3c8d, // side_effect_free_call_handler_info_map + 0x3cb5, // next_call_side_effect_free_call_handler_info_map + 0x3ad5, // simple_number_dictionary_map + 0x3285, // small_ordered_hash_map_map + 0x32ad, // small_ordered_hash_set_map + 0x32d5, // small_ordered_name_dictionary_map + 0x3d2d, // source_text_module_map + 0x3a5d, // swiss_name_dictionary_map + 0x3d55, // synthetic_module_map + 0x3da5, // wasm_api_function_ref_map + 0x3dcd, // wasm_capi_function_data_map + 0x3df5, // wasm_exported_function_data_map + 0x3e1d, // wasm_internal_function_map + 0x3e45, // wasm_js_function_data_map + 0x3e6d, // wasm_resume_data_map + 0x3e95, // wasm_type_info_map + 0x3ebd, // wasm_continuation_object_map + 0x2191, // weak_fixed_array_map + 0x21b9, // weak_array_list_map + 0x3b75, // ephemeron_hash_table_map + 0x3b4d, // embedder_data_array_map + 0x3ee5, // weak_cell_map + 0x2d85, // string_map + 0x2dfd, // cons_one_byte_string_map + 0x2dd5, // cons_string_map + 0x3005, // thin_string_map + 0x2e25, // sliced_string_map + 0x2e4d, // sliced_one_byte_string_map + 0x2e75, // external_string_map + 0x2e9d, // external_one_byte_string_map + 0x2ec5, // uncached_external_string_map + 0x2f15, // internalized_string_map + 0x2f65, // external_internalized_string_map + 0x2f8d, // external_one_byte_internalized_string_map + 0x2fb5, // uncached_external_internalized_string_map + 0x2fdd, // uncached_external_one_byte_internalized_string_map + 0x2eed, // uncached_external_one_byte_string_map + 0x307d, // shared_one_byte_string_map + 0x3055, // shared_string_map + 0x30cd, // shared_external_one_byte_string_map + 0x30a5, // shared_external_string_map + 0x311d, // shared_uncached_external_one_byte_string_map + 0x30f5, // shared_uncached_external_string_map + 0x316d, // shared_thin_one_byte_string_map + 0x3145, // shared_thin_string_map + 0x2231, // undefined_map + 0x2281, // the_hole_map + 0x2259, // null_map + 0x2bf5, // boolean_map + 0x2c1d, // uninitialized_map + 0x2c45, // arguments_marker_map + 0x2c6d, // exception_map + 0x2c95, // termination_exception_map + 0x2cbd, // optimized_out_map + 0x2ce5, // stale_register_map + 0x2d0d, // self_reference_marker_map + 0x2d35, // basic_block_counters_marker_map + 0x2a99, // empty_enum_cache + 0x3f81, // empty_property_array + 0x3f79, // empty_byte_array + 0x3f29, // empty_object_boilerplate_description + 0x3f35, // empty_array_boilerplate_description + 0x3f89, // empty_closure_feedback_cell_array + 0x820d, // empty_slow_element_dictionary + 0x8231, // empty_ordered_hash_map + 0x8245, // empty_ordered_hash_set + 0x829d, // empty_feedback_metadata + 0x81c9, // empty_property_dictionary + 0x8259, // empty_ordered_property_dictionary + 0x827d, // empty_swiss_property_dictionary + 0x3f91, // noop_interceptor_info + 0x3f0d, // empty_array_list + 0x22b1, // empty_weak_fixed_array + 0x22b9, // empty_weak_array_list + 0x3875, // invalid_prototype_validity_cell + 0x3fc5, // nan_value + 0x3fd1, // hole_nan_value + 0x3fdd, // infinity_value + 0x3fb9, // minus_zero_value + 0x3fe9, // minus_infinity_value + 0x3ff5, // max_safe_integer + 0x4001, // max_uint_32 + 0x400d, // smi_min_value + 0x4019, // smi_max_value_plus_one + 0x4035, // single_character_string_table + 0x7c55, // self_reference_marker + 0x7c95, // basic_block_counters_marker + 0x831d, // off_heap_trampoline_relocation_info + 0x22e1, // trampoline_trivial_code_data_container + 0x22e1, // trampoline_promise_rejection_code_data_container + 0x82a9, // global_this_binding_scope_info + 0x82c9, // empty_function_scope_info + 0x82ed, // native_scope_info + 0x8305, // shadow_realm_scope_info + 0x81f1, // empty_symbol_table + 0x4025, // hash_seed + 0x5449, // adoptText_string + 0x5461, // approximatelySign_string + 0x5481, // baseName_string + 0x5495, // accounting_string + 0x54ad, // breakType_string + 0x54c5, // calendars_string + 0x54dd, // cardinal_string + 0x54f1, // caseFirst_string + 0x5509, // ceil_string + 0x5519, // compare_string + 0x552d, // collation_string + 0x5545, // collations_string + 0x555d, // compact_string + 0x5571, // compactDisplay_string + 0x558d, // currency_string + 0x55a1, // currencyDisplay_string + 0x55bd, // currencySign_string + 0x55d5, // dateStyle_string + 0x55ed, // dateTimeField_string + 0x5609, // dayPeriod_string + 0x5621, // daysDisplay_string + 0x5639, // decimal_string + 0x564d, // dialect_string + 0x5661, // digital_string + 0x5675, // direction_string + 0x568d, // endRange_string + 0x56a1, // engineering_string + 0x56b9, // exceptZero_string + 0x56d1, // expand_string + 0x56e5, // exponentInteger_string + 0x5701, // exponentMinusSign_string + 0x5721, // exponentSeparator_string + 0x5741, // fallback_string + 0x5755, // first_string + 0x5769, // firstDay_string + 0x577d, // floor_string + 0x5791, // format_string + 0x57a5, // fraction_string + 0x57b9, // fractionalDigits_string + 0x57d5, // fractionalSecond_string + 0x57f1, // full_string + 0x5801, // granularity_string + 0x5819, // grapheme_string + 0x582d, // group_string + 0x5841, // h11_string + 0x5851, // h12_string + 0x5861, // h23_string + 0x5871, // h24_string + 0x5881, // halfCeil_string + 0x5895, // halfEven_string + 0x58a9, // halfExpand_string + 0x58c1, // halfFloor_string + 0x58d9, // halfTrunc_string + 0x58f1, // hour12_string + 0x5905, // hourCycle_string + 0x591d, // hourCycles_string + 0x5935, // hoursDisplay_string + 0x594d, // ideo_string + 0x595d, // ignorePunctuation_string + 0x597d, // Invalid_Date_string + 0x5995, // integer_string + 0x59a9, // isWordLike_string + 0x59c1, // kana_string + 0x59d1, // language_string + 0x59e5, // languageDisplay_string + 0x5a01, // lessPrecision_string + 0x5a1d, // letter_string + 0x5a31, // list_string + 0x5a41, // literal_string + 0x5a55, // locale_string + 0x5a69, // loose_string + 0x5a7d, // lower_string + 0x5a91, // ltr_string + 0x5aa1, // maximumFractionDigits_string + 0x5ac5, // maximumSignificantDigits_string + 0x5ae9, // microsecondsDisplay_string + 0x5b09, // millisecondsDisplay_string + 0x5b29, // min2_string + 0x5b39, // minimalDays_string + 0x5b51, // minimumFractionDigits_string + 0x5b75, // minimumIntegerDigits_string + 0x5b95, // minimumSignificantDigits_string + 0x5bb9, // minus_0 + 0x5bc9, // minusSign_string + 0x5be1, // minutesDisplay_string + 0x5bfd, // monthsDisplay_string + 0x5c19, // morePrecision_string + 0x5c35, // nan_string + 0x5c45, // nanosecondsDisplay_string + 0x5c65, // narrowSymbol_string + 0x5c7d, // negative_string + 0x5c91, // never_string + 0x5ca5, // none_string + 0x5cb5, // notation_string + 0x5cc9, // normal_string + 0x5cdd, // numberingSystem_string + 0x5cf9, // numberingSystems_string + 0x5d15, // numeric_string + 0x5d29, // ordinal_string + 0x5d3d, // percentSign_string + 0x5d55, // plusSign_string + 0x5d69, // quarter_string + 0x5d7d, // region_string + 0x5d91, // relatedYear_string + 0x5da9, // roundingMode_string + 0x5dc1, // roundingPriority_string + 0x5ddd, // rtl_string + 0x5ded, // scientific_string + 0x5e05, // secondsDisplay_string + 0x5e21, // segment_string + 0x5e35, // SegmentIterator_string + 0x5e51, // Segments_string + 0x5e65, // sensitivity_string + 0x5e7d, // sep_string + 0x5e8d, // shared_string + 0x5ea1, // signDisplay_string + 0x5eb9, // standard_string + 0x5ecd, // startRange_string + 0x5ee5, // strict_string + 0x5ef9, // stripIfInteger_string + 0x5f15, // style_string + 0x5f29, // term_string + 0x5f39, // textInfo_string + 0x5f4d, // timeStyle_string + 0x5f65, // timeZones_string + 0x5f7d, // timeZoneName_string + 0x5f95, // trailingZeroDisplay_string + 0x5fb5, // trunc_string + 0x5fc9, // two_digit_string + 0x5fdd, // type_string + 0x5fed, // unknown_string + 0x6001, // upper_string + 0x6015, // usage_string + 0x6029, // useGrouping_string + 0x6041, // unitDisplay_string + 0x6059, // weekday_string + 0x606d, // weekend_string + 0x6081, // weeksDisplay_string + 0x6099, // weekInfo_string + 0x60ad, // yearName_string + 0x60c1, // yearsDisplay_string + 0x60d9, // add_string + 0x60e9, // AggregateError_string + 0x6105, // always_string + 0x6119, // anonymous_function_string + 0x6139, // anonymous_string + 0x6151, // apply_string + 0x6165, // Arguments_string + 0x617d, // arguments_string + 0x6195, // arguments_to_string + 0x61b5, // Array_string + 0x61c9, // array_to_string + 0x61e5, // ArrayBuffer_string + 0x61fd, // ArrayIterator_string + 0x6219, // as_string + 0x6229, // assert_string + 0x623d, // async_string + 0x6251, // AtomicsCondition_string + 0x6271, // AtomicsMutex_string + 0x628d, // auto_string + 0x629d, // await_string + 0x62b1, // BigInt_string + 0x62c5, // bigint_string + 0x62d9, // BigInt64Array_string + 0x62f5, // BigUint64Array_string + 0x6311, // bind_string + 0x6321, // blank_string + 0x6335, // Boolean_string + 0x6349, // boolean_string + 0x635d, // boolean_to_string + 0x6379, // bound__string + 0x638d, // buffer_string + 0x63a1, // byte_length_string + 0x63b9, // byte_offset_string + 0x63d1, // CompileError_string + 0x63e9, // calendar_string + 0x63fd, // callee_string + 0x6411, // caller_string + 0x6425, // cause_string + 0x6439, // character_string + 0x6451, // closure_string + 0x6469, // code_string + 0x6479, // column_string + 0x648d, // computed_string + 0x64a5, // configurable_string + 0x64bd, // conjunction_string + 0x64d5, // console_string + 0x64e9, // constrain_string + 0x6501, // construct_string + 0x6519, // current_string + 0x652d, // Date_string + 0x653d, // date_to_string + 0x6559, // dateAdd_string + 0x656d, // dateFromFields_string + 0x6589, // dateUntil_string + 0x65a1, // day_string + 0x65b1, // dayOfWeek_string + 0x65c9, // dayOfYear_string + 0x65e1, // days_string + 0x65f1, // daysInMonth_string + 0x6609, // daysInWeek_string + 0x6621, // daysInYear_string + 0x6639, // default_string + 0x664d, // defineProperty_string + 0x6669, // deleteProperty_string + 0x6685, // disjunction_string + 0x669d, // done_string + 0x66ad, // dot_brand_string + 0x66c1, // dot_catch_string + 0x66d5, // dot_default_string + 0x66e9, // dot_for_string + 0x66f9, // dot_generator_object_string + 0x6719, // dot_home_object_string + 0x6731, // dot_new_target_string + 0x6749, // dot_result_string + 0x675d, // dot_repl_result_string + 0x6775, // dot_static_home_object_string + 0x471d, // dot_string + 0x6795, // dot_switch_tag_string + 0x67ad, // dotAll_string + 0x67c1, // Error_string + 0x67d5, // EvalError_string + 0x67ed, // enumerable_string + 0x6805, // element_string + 0x6819, // epochMicroseconds_string + 0x6839, // epochMilliseconds_string + 0x6859, // epochNanoseconds_string + 0x6875, // epochSeconds_string + 0x688d, // era_string + 0x689d, // eraYear_string + 0x68b1, // errors_string + 0x68c5, // error_to_string + 0x68e1, // eval_string + 0x68f1, // exception_string + 0x6909, // exec_string + 0x6919, // false_string + 0x692d, // fields_string + 0x6941, // FinalizationRegistry_string + 0x6961, // flags_string + 0x6975, // Float32Array_string + 0x698d, // Float64Array_string + 0x69a5, // fractionalSecondDigits_string + 0x69c9, // from_string + 0x69d9, // Function_string + 0x69ed, // function_native_code_string + 0x6a19, // function_string + 0x6a2d, // function_to_string + 0x6a4d, // Generator_string + 0x6a65, // get_space_string + 0x6a75, // get_string + 0x6a85, // getOffsetNanosecondsFor_string + 0x6aa9, // getOwnPropertyDescriptor_string + 0x6acd, // getPossibleInstantsFor_string + 0x6af1, // getPrototypeOf_string + 0x6b0d, // global_string + 0x6b21, // globalThis_string + 0x6b39, // groups_string + 0x6b4d, // growable_string + 0x6b61, // has_string + 0x6b71, // hasIndices_string + 0x6b89, // hour_string + 0x6b99, // hours_string + 0x6bad, // hoursInDay_string + 0x6bc5, // ignoreCase_string + 0x6bdd, // id_string + 0x6bed, // illegal_access_string + 0x6c09, // illegal_argument_string + 0x6c25, // inLeapYear_string + 0x6c3d, // index_string + 0x6c51, // indices_string + 0x6c65, // Infinity_string + 0x6c79, // infinity_string + 0x6c8d, // input_string + 0x6ca1, // Int16Array_string + 0x6cb9, // Int32Array_string + 0x6cd1, // Int8Array_string + 0x6ce9, // isExtensible_string + 0x6d01, // iso8601_string + 0x6d15, // isoDay_string + 0x6d29, // isoHour_string + 0x6d3d, // isoMicrosecond_string + 0x6d59, // isoMillisecond_string + 0x6d75, // isoMinute_string + 0x6d8d, // isoMonth_string + 0x6da1, // isoNanosecond_string + 0x6dbd, // isoSecond_string + 0x6dd5, // isoYear_string + 0x6de9, // jsMemoryEstimate_string + 0x6e05, // jsMemoryRange_string + 0x6e21, // keys_string + 0x6e31, // largestUnit_string + 0x6e49, // lastIndex_string + 0x6e61, // length_string + 0x6e75, // let_string + 0x6e85, // line_string + 0x6e95, // linear_string + 0x6ea9, // LinkError_string + 0x6ec1, // long_string + 0x6ed1, // Map_string + 0x6ee1, // MapIterator_string + 0x6ef9, // max_byte_length_string + 0x6f15, // medium_string + 0x6f29, // mergeFields_string + 0x6f41, // message_string + 0x6f55, // meta_string + 0x6f65, // minus_Infinity_string + 0x6f7d, // microsecond_string + 0x6f95, // microseconds_string + 0x6fad, // millisecond_string + 0x6fc5, // milliseconds_string + 0x6fdd, // minute_string + 0x6ff1, // minutes_string + 0x7005, // Module_string + 0x7019, // month_string + 0x702d, // monthDayFromFields_string + 0x704d, // months_string + 0x7061, // monthsInYear_string + 0x7079, // monthCode_string + 0x7091, // multiline_string + 0x70a9, // name_string + 0x70b9, // NaN_string + 0x70c9, // nanosecond_string + 0x70e1, // nanoseconds_string + 0x70f9, // narrow_string + 0x710d, // native_string + 0x6731, // new_target_string + 0x7121, // NFC_string + 0x7131, // NFD_string + 0x7141, // NFKC_string + 0x7151, // NFKD_string + 0x7161, // not_equal_string + 0x7179, // null_string + 0x7189, // null_to_string + 0x71a5, // Number_string + 0x71b9, // number_string + 0x71cd, // number_to_string + 0x71e9, // Object_string + 0x71fd, // object_string + 0x7211, // object_to_string + 0x722d, // of_string + 0x723d, // offset_string + 0x7251, // offsetNanoseconds_string + 0x7271, // ok_string + 0x474d, // one_string + 0x7281, // other_string + 0x7295, // overflow_string + 0x72a9, // ownKeys_string + 0x72bd, // percent_string + 0x72d1, // plainDate_string + 0x72e9, // plainTime_string + 0x7301, // position_string + 0x7315, // preventExtensions_string + 0x7335, // private_constructor_string + 0x734d, // Promise_string + 0x7361, // proto_string + 0x7379, // prototype_string + 0x7391, // proxy_string + 0x73a5, // Proxy_string + 0x73b9, // query_colon_string + 0x73c9, // RangeError_string + 0x73e1, // raw_json_string + 0x73f5, // raw_string + 0x7405, // ReferenceError_string + 0x7421, // ReflectGet_string + 0x7439, // ReflectHas_string + 0x7451, // RegExp_string + 0x7465, // regexp_to_string + 0x7481, // reject_string + 0x7495, // relativeTo_string + 0x74ad, // resizable_string + 0x74c5, // ResizableArrayBuffer_string + 0x74e5, // return_string + 0x74f9, // revoke_string + 0x750d, // roundingIncrement_string + 0x752d, // RuntimeError_string + 0x7545, // WebAssemblyException_string + 0x7569, // Script_string + 0x757d, // script_string + 0x7591, // second_string + 0x75a5, // seconds_string + 0x75b9, // short_string + 0x75cd, // Set_string + 0x75dd, // sentence_string + 0x75f1, // set_space_string + 0x7601, // set_string + 0x7611, // SetIterator_string + 0x7629, // setPrototypeOf_string + 0x7645, // ShadowRealm_string + 0x765d, // SharedArray_string + 0x7675, // SharedArrayBuffer_string + 0x7695, // SharedStruct_string + 0x76ad, // sign_string + 0x76bd, // smallestUnit_string + 0x76d5, // source_string + 0x76e9, // sourceText_string + 0x7701, // stack_string + 0x7715, // stackTraceLimit_string + 0x7731, // sticky_string + 0x7745, // String_string + 0x7759, // string_string + 0x776d, // string_to_string + 0x7789, // Symbol_iterator_string + 0x77a5, // symbol_species_string + 0x77c1, // Symbol_species_string + 0x77dd, // Symbol_string + 0x77f1, // symbol_string + 0x7805, // SyntaxError_string + 0x781d, // target_string + 0x7831, // this_function_string + 0x784d, // this_string + 0x785d, // throw_string + 0x7871, // timed_out_string + 0x7889, // timeZone_string + 0x789d, // toJSON_string + 0x78b1, // toString_string + 0x78c5, // true_string + 0x78d5, // total_string + 0x78e9, // TypeError_string + 0x7901, // Uint16Array_string + 0x7919, // Uint32Array_string + 0x7931, // Uint8Array_string + 0x7949, // Uint8ClampedArray_string + 0x7969, // undefined_string + 0x7981, // undefined_to_string + 0x79a1, // unicode_string + 0x79b5, // unicodeSets_string + 0x79cd, // unit_string + 0x79dd, // URIError_string + 0x79f1, // UTC_string + 0x7a01, // value_string + 0x7a15, // valueOf_string + 0x7a29, // WeakMap_string + 0x7a3d, // WeakRef_string + 0x7a51, // WeakSet_string + 0x7a65, // week_string + 0x7a75, // weeks_string + 0x7a89, // weekOfYear_string + 0x7aa1, // word_string + 0x7ab1, // writable_string + 0x7ac5, // yearMonthFromFields_string + 0x7ae5, // year_string + 0x7af5, // years_string + 0x473d, // zero_string + 0x7cd9, // array_buffer_wasm_memory_symbol + 0x7ce9, // call_site_info_symbol + 0x7cf9, // console_context_id_symbol + 0x7d09, // console_context_name_symbol + 0x7d19, // class_fields_symbol + 0x7d29, // class_positions_symbol + 0x7d39, // elements_transition_symbol + 0x7d49, // error_end_pos_symbol + 0x7d59, // error_script_symbol + 0x7d69, // error_stack_symbol + 0x7d79, // error_start_pos_symbol + 0x7d89, // frozen_symbol + 0x7d99, // interpreter_trampoline_symbol + 0x7da9, // mega_dom_symbol + 0x7db9, // megamorphic_symbol + 0x7dc9, // native_context_index_symbol + 0x7dd9, // nonextensible_symbol + 0x7de9, // not_mapped_symbol + 0x7df9, // promise_debug_marker_symbol + 0x7e09, // promise_debug_message_symbol + 0x7e19, // promise_forwarding_handler_symbol + 0x7e29, // promise_handled_by_symbol + 0x7e39, // promise_awaited_by_symbol + 0x7e49, // regexp_result_names_symbol + 0x7e59, // regexp_result_regexp_input_symbol + 0x7e69, // regexp_result_regexp_last_index_symbol + 0x7e79, // sealed_symbol + 0x7e89, // strict_function_transition_symbol + 0x7e99, // template_literal_function_literal_id_symbol + 0x7ea9, // template_literal_slot_id_symbol + 0x7eb9, // wasm_exception_tag_symbol + 0x7ec9, // wasm_exception_values_symbol + 0x7ed9, // wasm_uncatchable_symbol + 0x7ee9, // wasm_wrapped_object_symbol + 0x7ef9, // wasm_debug_proxy_cache_symbol + 0x7f09, // wasm_debug_proxy_names_symbol + 0x7f19, // uninitialized_symbol + 0x7f29, // async_iterator_symbol + 0x7f59, // intl_fallback_symbol + 0x7f91, // match_all_symbol + 0x7fbd, // match_symbol + 0x7fe5, // replace_symbol + 0x8011, // search_symbol + 0x803d, // split_symbol + 0x8065, // to_primitive_symbol + 0x8095, // unscopables_symbol + 0x80c5, // has_instance_symbol + 0x80f5, // to_string_tag_symbol + 0x2319, // promise_fulfill_reaction_job_task_map + 0x2341, // promise_reject_reaction_job_task_map + 0x2369, // callable_task_map + 0x2391, // callback_task_map + 0x23b9, // promise_resolve_thenable_job_task_map + 0x23e1, // function_template_info_map + 0x2409, // object_template_info_map + 0x2431, // access_check_info_map + 0x2459, // accessor_pair_map + 0x2481, // aliased_arguments_entry_map + 0x24a9, // allocation_memento_map + 0x24d1, // array_boilerplate_description_map + 0x24f9, // asm_wasm_data_map + 0x2521, // async_generator_request_map + 0x2549, // break_point_map + 0x2571, // break_point_info_map + 0x2599, // call_site_info_map + 0x25c1, // class_positions_map + 0x25e9, // debug_info_map + 0x2611, // enum_cache_map + 0x2639, // error_stack_data_map + 0x2661, // function_template_rare_data_map + 0x2689, // interceptor_info_map + 0x26b1, // interpreter_data_map + 0x26d9, // module_request_map + 0x2701, // promise_capability_map + 0x2729, // promise_on_stack_map + 0x2751, // promise_reaction_map + 0x2779, // property_descriptor_object_map + 0x27a1, // prototype_info_map + 0x27c9, // regexp_boilerplate_description_map + 0x27f1, // script_map + 0x2819, // script_or_module_map + 0x2841, // module_info_entry_map + 0x2869, // stack_frame_info_map + 0x2891, // template_object_description_map + 0x28b9, // tuple2_map + 0x28e1, // wasm_exception_tag_map + 0x2909, // wasm_indirect_function_table_map + 0x370d, // sloppy_arguments_elements_map + 0x2209, // descriptor_array_map + 0x3735, // strong_descriptor_array_map + 0x32fd, // uncompiled_data_without_preparse_data_map + 0x3325, // uncompiled_data_with_preparse_data_map + 0x334d, // uncompiled_data_without_preparse_data_with_job_map + 0x3375, // uncompiled_data_with_preparse_data_and_job_map + 0x339d, // on_heap_basic_block_profiler_data_map + 0x33c5, // turbofan_bitset_type_map + 0x33ed, // turbofan_union_type_map + 0x3415, // turbofan_range_type_map + 0x343d, // turbofan_heap_constant_type_map + 0x3465, // turbofan_other_number_constant_type_map + 0x348d, // turboshaft_word32type_map + 0x34b5, // turboshaft_word32range_type_map + 0x375d, // turboshaft_word32set_type_map + 0x34dd, // turboshaft_word64type_map + 0x3505, // turboshaft_word64range_type_map + 0x3785, // turboshaft_word64set_type_map + 0x352d, // turboshaft_float64type_map + 0x3555, // turboshaft_float64range_type_map + 0x37ad, // turboshaft_float64set_type_map + 0x357d, // internal_class_map + 0x35a5, // smi_pair_map + 0x35cd, // smi_box_map + 0x35f5, // exported_sub_class_base_map + 0x361d, // exported_sub_class_map + 0x3645, // abstract_internal_class_subclass1_map + 0x366d, // abstract_internal_class_subclass2_map + 0x37d5, // internal_class_with_smi_elements_map + 0x37fd, // internal_class_with_struct_elements_map + 0x3695, // exported_sub_class2_map + 0x36bd, // sort_state_map + 0x36e5, // wasm_string_view_iter_map + 0x2931, // allocation_site_map + 0x2959, // allocation_site_without_weaknext_map + 0x814d, // constructor_string + 0x8165, // next_string + 0x8175, // resolve_string + 0x8189, // then_string + 0x8199, // iterator_symbol + 0x81a9, // species_symbol + 0x81b9, // is_concat_spreadable_symbol + 0x2981, // load_handler1_map + 0x29a9, // load_handler2_map + 0x29d1, // load_handler3_map + 0x29f9, // store_handler0_map + 0x2a21, // store_handler1_map + 0x2a49, // store_handler2_map + 0x2a71, // store_handler3_map +}; } // namespace internal } // namespace v8 diff --git a/src/snapshot/static-roots-gen.cc b/src/snapshot/static-roots-gen.cc index 79036ea89c..ae0068d29b 100644 --- a/src/snapshot/static-roots-gen.cc +++ b/src/snapshot/static-roots-gen.cc @@ -30,6 +30,9 @@ void StaticRootsTableGen::write(Isolate* isolate, const char* file) { "that can be\n" << "// found in the LICENSE file.\n" << "\n" + << "// This file is automatically generated by " + "`tools/dev/gen-static-roots.py`. Do\n// not edit manually.\n" + << "\n" << "#ifndef V8_ROOTS_STATIC_ROOTS_H_\n" << "#define V8_ROOTS_STATIC_ROOTS_H_\n" << "\n" diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py index ee486d1cc5..47ad87265c 100644 --- a/tools/v8heapconst.py +++ b/tools/v8heapconst.py @@ -295,250 +295,250 @@ INSTANCE_TYPES = { # List of known V8 maps. KNOWN_MAPS = { ("read_only_space", 0x02141): (255, "MetaMap"), - ("read_only_space", 0x02169): (131, "NullMap"), - ("read_only_space", 0x02191): (237, "StrongDescriptorArrayMap"), + ("read_only_space", 0x02169): (175, "FixedArrayMap"), + ("read_only_space", 0x02191): (240, "WeakFixedArrayMap"), ("read_only_space", 0x021b9): (273, "WeakArrayListMap"), - ("read_only_space", 0x021fd): (154, "EnumCacheMap"), - ("read_only_space", 0x02231): (175, "FixedArrayMap"), - ("read_only_space", 0x0227d): (8, "OneByteInternalizedStringMap"), - ("read_only_space", 0x022c9): (252, "FreeSpaceMap"), - ("read_only_space", 0x022f1): (251, "OnePointerFillerMap"), - ("read_only_space", 0x02319): (251, "TwoPointerFillerMap"), - ("read_only_space", 0x02341): (131, "UninitializedMap"), - ("read_only_space", 0x023b9): (131, "UndefinedMap"), - ("read_only_space", 0x023fd): (130, "HeapNumberMap"), - ("read_only_space", 0x02431): (131, "TheHoleMap"), - ("read_only_space", 0x02491): (131, "BooleanMap"), - ("read_only_space", 0x02535): (190, "ByteArrayMap"), - ("read_only_space", 0x0255d): (175, "FixedCOWArrayMap"), - ("read_only_space", 0x02585): (176, "HashTableMap"), - ("read_only_space", 0x025ad): (128, "SymbolMap"), - ("read_only_space", 0x025d5): (40, "OneByteStringMap"), - ("read_only_space", 0x025fd): (261, "ScopeInfoMap"), - ("read_only_space", 0x02625): (262, "SharedFunctionInfoMap"), - ("read_only_space", 0x0264d): (245, "CodeMap"), - ("read_only_space", 0x02675): (244, "CellMap"), - ("read_only_space", 0x0269d): (260, "GlobalPropertyCellMap"), - ("read_only_space", 0x026c5): (204, "ForeignMap"), - ("read_only_space", 0x026ed): (241, "TransitionArrayMap"), - ("read_only_space", 0x02715): (45, "ThinOneByteStringMap"), - ("read_only_space", 0x0273d): (250, "FeedbackVectorMap"), - ("read_only_space", 0x02775): (131, "ArgumentsMarkerMap"), - ("read_only_space", 0x027d5): (131, "ExceptionMap"), - ("read_only_space", 0x02831): (131, "TerminationExceptionMap"), - ("read_only_space", 0x02899): (131, "OptimizedOutMap"), - ("read_only_space", 0x028f9): (131, "StaleRegisterMap"), - ("read_only_space", 0x02959): (189, "ScriptContextTableMap"), - ("read_only_space", 0x02981): (187, "ClosureFeedbackCellArrayMap"), - ("read_only_space", 0x029a9): (249, "FeedbackMetadataArrayMap"), - ("read_only_space", 0x029d1): (175, "ArrayListMap"), - ("read_only_space", 0x029f9): (129, "BigIntMap"), - ("read_only_space", 0x02a21): (188, "ObjectBoilerplateDescriptionMap"), - ("read_only_space", 0x02a49): (191, "BytecodeArrayMap"), - ("read_only_space", 0x02a71): (246, "CodeDataContainerMap"), - ("read_only_space", 0x02a99): (247, "CoverageInfoMap"), - ("read_only_space", 0x02ac1): (192, "FixedDoubleArrayMap"), - ("read_only_space", 0x02ae9): (178, "GlobalDictionaryMap"), - ("read_only_space", 0x02b11): (156, "ManyClosuresCellMap"), - ("read_only_space", 0x02b39): (256, "MegaDomHandlerMap"), - ("read_only_space", 0x02b61): (175, "ModuleInfoMap"), - ("read_only_space", 0x02b89): (179, "NameDictionaryMap"), - ("read_only_space", 0x02bb1): (156, "NoClosuresCellMap"), - ("read_only_space", 0x02bd9): (181, "NumberDictionaryMap"), - ("read_only_space", 0x02c01): (156, "OneClosureCellMap"), - ("read_only_space", 0x02c29): (182, "OrderedHashMapMap"), - ("read_only_space", 0x02c51): (183, "OrderedHashSetMap"), - ("read_only_space", 0x02c79): (180, "NameToIndexHashTableMap"), - ("read_only_space", 0x02ca1): (185, "RegisteredSymbolTableMap"), - ("read_only_space", 0x02cc9): (184, "OrderedNameDictionaryMap"), - ("read_only_space", 0x02cf1): (258, "PreparseDataMap"), - ("read_only_space", 0x02d19): (259, "PropertyArrayMap"), - ("read_only_space", 0x02d41): (242, "AccessorInfoMap"), - ("read_only_space", 0x02d69): (243, "SideEffectCallHandlerInfoMap"), - ("read_only_space", 0x02d91): (243, "SideEffectFreeCallHandlerInfoMap"), - ("read_only_space", 0x02db9): (243, "NextCallSideEffectFreeCallHandlerInfoMap"), - ("read_only_space", 0x02de1): (186, "SimpleNumberDictionaryMap"), - ("read_only_space", 0x02e09): (231, "SmallOrderedHashMapMap"), - ("read_only_space", 0x02e31): (232, "SmallOrderedHashSetMap"), - ("read_only_space", 0x02e59): (233, "SmallOrderedNameDictionaryMap"), - ("read_only_space", 0x02e81): (238, "SourceTextModuleMap"), - ("read_only_space", 0x02ea9): (266, "SwissNameDictionaryMap"), - ("read_only_space", 0x02ed1): (239, "SyntheticModuleMap"), - ("read_only_space", 0x02ef9): (267, "WasmApiFunctionRefMap"), - ("read_only_space", 0x02f21): (225, "WasmCapiFunctionDataMap"), - ("read_only_space", 0x02f49): (226, "WasmExportedFunctionDataMap"), - ("read_only_space", 0x02f71): (269, "WasmInternalFunctionMap"), - ("read_only_space", 0x02f99): (227, "WasmJSFunctionDataMap"), - ("read_only_space", 0x02fc1): (270, "WasmResumeDataMap"), - ("read_only_space", 0x02fe9): (272, "WasmTypeInfoMap"), - ("read_only_space", 0x03011): (268, "WasmContinuationObjectMap"), - ("read_only_space", 0x03039): (240, "WeakFixedArrayMap"), - ("read_only_space", 0x03061): (177, "EphemeronHashTableMap"), - ("read_only_space", 0x03089): (248, "EmbedderDataArrayMap"), - ("read_only_space", 0x030b1): (274, "WeakCellMap"), - ("read_only_space", 0x030d9): (32, "StringMap"), - ("read_only_space", 0x03101): (41, "ConsOneByteStringMap"), - ("read_only_space", 0x03129): (33, "ConsStringMap"), - ("read_only_space", 0x03151): (37, "ThinStringMap"), - ("read_only_space", 0x03179): (35, "SlicedStringMap"), - ("read_only_space", 0x031a1): (43, "SlicedOneByteStringMap"), - ("read_only_space", 0x031c9): (34, "ExternalStringMap"), - ("read_only_space", 0x031f1): (42, "ExternalOneByteStringMap"), - ("read_only_space", 0x03219): (50, "UncachedExternalStringMap"), - ("read_only_space", 0x03241): (0, "InternalizedStringMap"), - ("read_only_space", 0x03269): (2, "ExternalInternalizedStringMap"), - ("read_only_space", 0x03291): (10, "ExternalOneByteInternalizedStringMap"), - ("read_only_space", 0x032b9): (18, "UncachedExternalInternalizedStringMap"), - ("read_only_space", 0x032e1): (26, "UncachedExternalOneByteInternalizedStringMap"), - ("read_only_space", 0x03309): (58, "UncachedExternalOneByteStringMap"), - ("read_only_space", 0x03331): (104, "SharedOneByteStringMap"), - ("read_only_space", 0x03359): (96, "SharedStringMap"), - ("read_only_space", 0x03381): (106, "SharedExternalOneByteStringMap"), - ("read_only_space", 0x033a9): (98, "SharedExternalStringMap"), - ("read_only_space", 0x033d1): (122, "SharedUncachedExternalOneByteStringMap"), - ("read_only_space", 0x033f9): (114, "SharedUncachedExternalStringMap"), - ("read_only_space", 0x03421): (109, "SharedThinOneByteStringMap"), - ("read_only_space", 0x03449): (101, "SharedThinStringMap"), - ("read_only_space", 0x03471): (131, "SelfReferenceMarkerMap"), - ("read_only_space", 0x03499): (131, "BasicBlockCountersMarkerMap"), - ("read_only_space", 0x034dd): (146, "ArrayBoilerplateDescriptionMap"), - ("read_only_space", 0x035dd): (158, "InterceptorInfoMap"), - ("read_only_space", 0x07655): (132, "PromiseFulfillReactionJobTaskMap"), - ("read_only_space", 0x0767d): (133, "PromiseRejectReactionJobTaskMap"), - ("read_only_space", 0x076a5): (134, "CallableTaskMap"), - ("read_only_space", 0x076cd): (135, "CallbackTaskMap"), - ("read_only_space", 0x076f5): (136, "PromiseResolveThenableJobTaskMap"), - ("read_only_space", 0x0771d): (139, "FunctionTemplateInfoMap"), - ("read_only_space", 0x07745): (140, "ObjectTemplateInfoMap"), - ("read_only_space", 0x0776d): (141, "AccessCheckInfoMap"), - ("read_only_space", 0x07795): (142, "AccessorPairMap"), - ("read_only_space", 0x077bd): (143, "AliasedArgumentsEntryMap"), - ("read_only_space", 0x077e5): (144, "AllocationMementoMap"), - ("read_only_space", 0x0780d): (147, "AsmWasmDataMap"), - ("read_only_space", 0x07835): (148, "AsyncGeneratorRequestMap"), - ("read_only_space", 0x0785d): (149, "BreakPointMap"), - ("read_only_space", 0x07885): (150, "BreakPointInfoMap"), - ("read_only_space", 0x078ad): (151, "CallSiteInfoMap"), - ("read_only_space", 0x078d5): (152, "ClassPositionsMap"), - ("read_only_space", 0x078fd): (153, "DebugInfoMap"), - ("read_only_space", 0x07925): (155, "ErrorStackDataMap"), - ("read_only_space", 0x0794d): (157, "FunctionTemplateRareDataMap"), - ("read_only_space", 0x07975): (159, "InterpreterDataMap"), - ("read_only_space", 0x0799d): (160, "ModuleRequestMap"), - ("read_only_space", 0x079c5): (161, "PromiseCapabilityMap"), - ("read_only_space", 0x079ed): (162, "PromiseOnStackMap"), - ("read_only_space", 0x07a15): (163, "PromiseReactionMap"), - ("read_only_space", 0x07a3d): (164, "PropertyDescriptorObjectMap"), - ("read_only_space", 0x07a65): (165, "PrototypeInfoMap"), - ("read_only_space", 0x07a8d): (166, "RegExpBoilerplateDescriptionMap"), - ("read_only_space", 0x07ab5): (167, "ScriptMap"), - ("read_only_space", 0x07add): (168, "ScriptOrModuleMap"), - ("read_only_space", 0x07b05): (169, "SourceTextModuleInfoEntryMap"), - ("read_only_space", 0x07b2d): (170, "StackFrameInfoMap"), - ("read_only_space", 0x07b55): (171, "TemplateObjectDescriptionMap"), - ("read_only_space", 0x07b7d): (172, "Tuple2Map"), - ("read_only_space", 0x07ba5): (173, "WasmExceptionTagMap"), - ("read_only_space", 0x07bcd): (174, "WasmIndirectFunctionTableMap"), - ("read_only_space", 0x07bf5): (194, "SloppyArgumentsElementsMap"), - ("read_only_space", 0x07c1d): (236, "DescriptorArrayMap"), - ("read_only_space", 0x07c45): (222, "UncompiledDataWithoutPreparseDataMap"), - ("read_only_space", 0x07c6d): (220, "UncompiledDataWithPreparseDataMap"), - ("read_only_space", 0x07c95): (223, "UncompiledDataWithoutPreparseDataWithJobMap"), - ("read_only_space", 0x07cbd): (221, "UncompiledDataWithPreparseDataAndJobMap"), - ("read_only_space", 0x07ce5): (257, "OnHeapBasicBlockProfilerDataMap"), - ("read_only_space", 0x07d0d): (215, "TurbofanBitsetTypeMap"), - ("read_only_space", 0x07d35): (219, "TurbofanUnionTypeMap"), - ("read_only_space", 0x07d5d): (218, "TurbofanRangeTypeMap"), - ("read_only_space", 0x07d85): (216, "TurbofanHeapConstantTypeMap"), - ("read_only_space", 0x07dad): (217, "TurbofanOtherNumberConstantTypeMap"), - ("read_only_space", 0x07dd5): (198, "TurboshaftWord32TypeMap"), - ("read_only_space", 0x07dfd): (199, "TurboshaftWord32RangeTypeMap"), - ("read_only_space", 0x07e25): (200, "TurboshaftWord32SetTypeMap"), - ("read_only_space", 0x07e4d): (201, "TurboshaftWord64TypeMap"), - ("read_only_space", 0x07e75): (202, "TurboshaftWord64RangeTypeMap"), - ("read_only_space", 0x07e9d): (203, "TurboshaftWord64SetTypeMap"), - ("read_only_space", 0x07ec5): (195, "TurboshaftFloat64TypeMap"), - ("read_only_space", 0x07eed): (196, "TurboshaftFloat64RangeTypeMap"), - ("read_only_space", 0x07f15): (197, "TurboshaftFloat64SetTypeMap"), - ("read_only_space", 0x07f3d): (253, "InternalClassMap"), - ("read_only_space", 0x07f65): (264, "SmiPairMap"), - ("read_only_space", 0x07f8d): (263, "SmiBoxMap"), - ("read_only_space", 0x07fb5): (228, "ExportedSubClassBaseMap"), - ("read_only_space", 0x07fdd): (229, "ExportedSubClassMap"), - ("read_only_space", 0x08005): (234, "AbstractInternalClassSubclass1Map"), - ("read_only_space", 0x0802d): (235, "AbstractInternalClassSubclass2Map"), - ("read_only_space", 0x08055): (193, "InternalClassWithSmiElementsMap"), - ("read_only_space", 0x0807d): (254, "InternalClassWithStructElementsMap"), - ("read_only_space", 0x080a5): (230, "ExportedSubClass2Map"), - ("read_only_space", 0x080cd): (265, "SortStateMap"), - ("read_only_space", 0x080f5): (271, "WasmStringViewIterMap"), - ("read_only_space", 0x0811d): (145, "AllocationSiteWithWeakNextMap"), - ("read_only_space", 0x08145): (145, "AllocationSiteWithoutWeakNextMap"), - ("read_only_space", 0x08211): (137, "LoadHandler1Map"), - ("read_only_space", 0x08239): (137, "LoadHandler2Map"), - ("read_only_space", 0x08261): (137, "LoadHandler3Map"), - ("read_only_space", 0x08289): (138, "StoreHandler0Map"), - ("read_only_space", 0x082b1): (138, "StoreHandler1Map"), - ("read_only_space", 0x082d9): (138, "StoreHandler2Map"), - ("read_only_space", 0x08301): (138, "StoreHandler3Map"), + ("read_only_space", 0x021e1): (175, "FixedCOWArrayMap"), + ("read_only_space", 0x02209): (236, "DescriptorArrayMap"), + ("read_only_space", 0x02231): (131, "UndefinedMap"), + ("read_only_space", 0x02259): (131, "NullMap"), + ("read_only_space", 0x02281): (131, "TheHoleMap"), + ("read_only_space", 0x02319): (132, "PromiseFulfillReactionJobTaskMap"), + ("read_only_space", 0x02341): (133, "PromiseRejectReactionJobTaskMap"), + ("read_only_space", 0x02369): (134, "CallableTaskMap"), + ("read_only_space", 0x02391): (135, "CallbackTaskMap"), + ("read_only_space", 0x023b9): (136, "PromiseResolveThenableJobTaskMap"), + ("read_only_space", 0x023e1): (139, "FunctionTemplateInfoMap"), + ("read_only_space", 0x02409): (140, "ObjectTemplateInfoMap"), + ("read_only_space", 0x02431): (141, "AccessCheckInfoMap"), + ("read_only_space", 0x02459): (142, "AccessorPairMap"), + ("read_only_space", 0x02481): (143, "AliasedArgumentsEntryMap"), + ("read_only_space", 0x024a9): (144, "AllocationMementoMap"), + ("read_only_space", 0x024d1): (146, "ArrayBoilerplateDescriptionMap"), + ("read_only_space", 0x024f9): (147, "AsmWasmDataMap"), + ("read_only_space", 0x02521): (148, "AsyncGeneratorRequestMap"), + ("read_only_space", 0x02549): (149, "BreakPointMap"), + ("read_only_space", 0x02571): (150, "BreakPointInfoMap"), + ("read_only_space", 0x02599): (151, "CallSiteInfoMap"), + ("read_only_space", 0x025c1): (152, "ClassPositionsMap"), + ("read_only_space", 0x025e9): (153, "DebugInfoMap"), + ("read_only_space", 0x02611): (154, "EnumCacheMap"), + ("read_only_space", 0x02639): (155, "ErrorStackDataMap"), + ("read_only_space", 0x02661): (157, "FunctionTemplateRareDataMap"), + ("read_only_space", 0x02689): (158, "InterceptorInfoMap"), + ("read_only_space", 0x026b1): (159, "InterpreterDataMap"), + ("read_only_space", 0x026d9): (160, "ModuleRequestMap"), + ("read_only_space", 0x02701): (161, "PromiseCapabilityMap"), + ("read_only_space", 0x02729): (162, "PromiseOnStackMap"), + ("read_only_space", 0x02751): (163, "PromiseReactionMap"), + ("read_only_space", 0x02779): (164, "PropertyDescriptorObjectMap"), + ("read_only_space", 0x027a1): (165, "PrototypeInfoMap"), + ("read_only_space", 0x027c9): (166, "RegExpBoilerplateDescriptionMap"), + ("read_only_space", 0x027f1): (167, "ScriptMap"), + ("read_only_space", 0x02819): (168, "ScriptOrModuleMap"), + ("read_only_space", 0x02841): (169, "SourceTextModuleInfoEntryMap"), + ("read_only_space", 0x02869): (170, "StackFrameInfoMap"), + ("read_only_space", 0x02891): (171, "TemplateObjectDescriptionMap"), + ("read_only_space", 0x028b9): (172, "Tuple2Map"), + ("read_only_space", 0x028e1): (173, "WasmExceptionTagMap"), + ("read_only_space", 0x02909): (174, "WasmIndirectFunctionTableMap"), + ("read_only_space", 0x02931): (145, "AllocationSiteWithWeakNextMap"), + ("read_only_space", 0x02959): (145, "AllocationSiteWithoutWeakNextMap"), + ("read_only_space", 0x02981): (137, "LoadHandler1Map"), + ("read_only_space", 0x029a9): (137, "LoadHandler2Map"), + ("read_only_space", 0x029d1): (137, "LoadHandler3Map"), + ("read_only_space", 0x029f9): (138, "StoreHandler0Map"), + ("read_only_space", 0x02a21): (138, "StoreHandler1Map"), + ("read_only_space", 0x02a49): (138, "StoreHandler2Map"), + ("read_only_space", 0x02a71): (138, "StoreHandler3Map"), + ("read_only_space", 0x02ab5): (261, "ScopeInfoMap"), + ("read_only_space", 0x02add): (175, "ModuleInfoMap"), + ("read_only_space", 0x02b05): (187, "ClosureFeedbackCellArrayMap"), + ("read_only_space", 0x02b2d): (250, "FeedbackVectorMap"), + ("read_only_space", 0x02b55): (130, "HeapNumberMap"), + ("read_only_space", 0x02b7d): (128, "SymbolMap"), + ("read_only_space", 0x02ba5): (204, "ForeignMap"), + ("read_only_space", 0x02bcd): (256, "MegaDomHandlerMap"), + ("read_only_space", 0x02bf5): (131, "BooleanMap"), + ("read_only_space", 0x02c1d): (131, "UninitializedMap"), + ("read_only_space", 0x02c45): (131, "ArgumentsMarkerMap"), + ("read_only_space", 0x02c6d): (131, "ExceptionMap"), + ("read_only_space", 0x02c95): (131, "TerminationExceptionMap"), + ("read_only_space", 0x02cbd): (131, "OptimizedOutMap"), + ("read_only_space", 0x02ce5): (131, "StaleRegisterMap"), + ("read_only_space", 0x02d0d): (131, "SelfReferenceMarkerMap"), + ("read_only_space", 0x02d35): (131, "BasicBlockCountersMarkerMap"), + ("read_only_space", 0x02d5d): (129, "BigIntMap"), + ("read_only_space", 0x02d85): (32, "StringMap"), + ("read_only_space", 0x02dad): (40, "OneByteStringMap"), + ("read_only_space", 0x02dd5): (33, "ConsStringMap"), + ("read_only_space", 0x02dfd): (41, "ConsOneByteStringMap"), + ("read_only_space", 0x02e25): (35, "SlicedStringMap"), + ("read_only_space", 0x02e4d): (43, "SlicedOneByteStringMap"), + ("read_only_space", 0x02e75): (34, "ExternalStringMap"), + ("read_only_space", 0x02e9d): (42, "ExternalOneByteStringMap"), + ("read_only_space", 0x02ec5): (50, "UncachedExternalStringMap"), + ("read_only_space", 0x02eed): (58, "UncachedExternalOneByteStringMap"), + ("read_only_space", 0x02f15): (0, "InternalizedStringMap"), + ("read_only_space", 0x02f3d): (8, "OneByteInternalizedStringMap"), + ("read_only_space", 0x02f65): (2, "ExternalInternalizedStringMap"), + ("read_only_space", 0x02f8d): (10, "ExternalOneByteInternalizedStringMap"), + ("read_only_space", 0x02fb5): (18, "UncachedExternalInternalizedStringMap"), + ("read_only_space", 0x02fdd): (26, "UncachedExternalOneByteInternalizedStringMap"), + ("read_only_space", 0x03005): (37, "ThinStringMap"), + ("read_only_space", 0x0302d): (45, "ThinOneByteStringMap"), + ("read_only_space", 0x03055): (96, "SharedStringMap"), + ("read_only_space", 0x0307d): (104, "SharedOneByteStringMap"), + ("read_only_space", 0x030a5): (98, "SharedExternalStringMap"), + ("read_only_space", 0x030cd): (106, "SharedExternalOneByteStringMap"), + ("read_only_space", 0x030f5): (114, "SharedUncachedExternalStringMap"), + ("read_only_space", 0x0311d): (122, "SharedUncachedExternalOneByteStringMap"), + ("read_only_space", 0x03145): (101, "SharedThinStringMap"), + ("read_only_space", 0x0316d): (109, "SharedThinOneByteStringMap"), + ("read_only_space", 0x03195): (192, "FixedDoubleArrayMap"), + ("read_only_space", 0x031bd): (249, "FeedbackMetadataArrayMap"), + ("read_only_space", 0x031e5): (190, "ByteArrayMap"), + ("read_only_space", 0x0320d): (191, "BytecodeArrayMap"), + ("read_only_space", 0x03235): (252, "FreeSpaceMap"), + ("read_only_space", 0x0325d): (259, "PropertyArrayMap"), + ("read_only_space", 0x03285): (231, "SmallOrderedHashMapMap"), + ("read_only_space", 0x032ad): (232, "SmallOrderedHashSetMap"), + ("read_only_space", 0x032d5): (233, "SmallOrderedNameDictionaryMap"), + ("read_only_space", 0x032fd): (222, "UncompiledDataWithoutPreparseDataMap"), + ("read_only_space", 0x03325): (220, "UncompiledDataWithPreparseDataMap"), + ("read_only_space", 0x0334d): (223, "UncompiledDataWithoutPreparseDataWithJobMap"), + ("read_only_space", 0x03375): (221, "UncompiledDataWithPreparseDataAndJobMap"), + ("read_only_space", 0x0339d): (257, "OnHeapBasicBlockProfilerDataMap"), + ("read_only_space", 0x033c5): (215, "TurbofanBitsetTypeMap"), + ("read_only_space", 0x033ed): (219, "TurbofanUnionTypeMap"), + ("read_only_space", 0x03415): (218, "TurbofanRangeTypeMap"), + ("read_only_space", 0x0343d): (216, "TurbofanHeapConstantTypeMap"), + ("read_only_space", 0x03465): (217, "TurbofanOtherNumberConstantTypeMap"), + ("read_only_space", 0x0348d): (198, "TurboshaftWord32TypeMap"), + ("read_only_space", 0x034b5): (199, "TurboshaftWord32RangeTypeMap"), + ("read_only_space", 0x034dd): (201, "TurboshaftWord64TypeMap"), + ("read_only_space", 0x03505): (202, "TurboshaftWord64RangeTypeMap"), + ("read_only_space", 0x0352d): (195, "TurboshaftFloat64TypeMap"), + ("read_only_space", 0x03555): (196, "TurboshaftFloat64RangeTypeMap"), + ("read_only_space", 0x0357d): (253, "InternalClassMap"), + ("read_only_space", 0x035a5): (264, "SmiPairMap"), + ("read_only_space", 0x035cd): (263, "SmiBoxMap"), + ("read_only_space", 0x035f5): (228, "ExportedSubClassBaseMap"), + ("read_only_space", 0x0361d): (229, "ExportedSubClassMap"), + ("read_only_space", 0x03645): (234, "AbstractInternalClassSubclass1Map"), + ("read_only_space", 0x0366d): (235, "AbstractInternalClassSubclass2Map"), + ("read_only_space", 0x03695): (230, "ExportedSubClass2Map"), + ("read_only_space", 0x036bd): (265, "SortStateMap"), + ("read_only_space", 0x036e5): (271, "WasmStringViewIterMap"), + ("read_only_space", 0x0370d): (194, "SloppyArgumentsElementsMap"), + ("read_only_space", 0x03735): (237, "StrongDescriptorArrayMap"), + ("read_only_space", 0x0375d): (200, "TurboshaftWord32SetTypeMap"), + ("read_only_space", 0x03785): (203, "TurboshaftWord64SetTypeMap"), + ("read_only_space", 0x037ad): (197, "TurboshaftFloat64SetTypeMap"), + ("read_only_space", 0x037d5): (193, "InternalClassWithSmiElementsMap"), + ("read_only_space", 0x037fd): (254, "InternalClassWithStructElementsMap"), + ("read_only_space", 0x03825): (245, "CodeMap"), + ("read_only_space", 0x0384d): (244, "CellMap"), + ("read_only_space", 0x0387d): (260, "GlobalPropertyCellMap"), + ("read_only_space", 0x038a5): (251, "OnePointerFillerMap"), + ("read_only_space", 0x038cd): (251, "TwoPointerFillerMap"), + ("read_only_space", 0x038f5): (156, "NoClosuresCellMap"), + ("read_only_space", 0x0391d): (156, "OneClosureCellMap"), + ("read_only_space", 0x03945): (156, "ManyClosuresCellMap"), + ("read_only_space", 0x0396d): (241, "TransitionArrayMap"), + ("read_only_space", 0x03995): (176, "HashTableMap"), + ("read_only_space", 0x039bd): (182, "OrderedHashMapMap"), + ("read_only_space", 0x039e5): (183, "OrderedHashSetMap"), + ("read_only_space", 0x03a0d): (184, "OrderedNameDictionaryMap"), + ("read_only_space", 0x03a35): (179, "NameDictionaryMap"), + ("read_only_space", 0x03a5d): (266, "SwissNameDictionaryMap"), + ("read_only_space", 0x03a85): (178, "GlobalDictionaryMap"), + ("read_only_space", 0x03aad): (181, "NumberDictionaryMap"), + ("read_only_space", 0x03ad5): (186, "SimpleNumberDictionaryMap"), + ("read_only_space", 0x03afd): (180, "NameToIndexHashTableMap"), + ("read_only_space", 0x03b25): (185, "RegisteredSymbolTableMap"), + ("read_only_space", 0x03b4d): (248, "EmbedderDataArrayMap"), + ("read_only_space", 0x03b75): (177, "EphemeronHashTableMap"), + ("read_only_space", 0x03b9d): (175, "ArrayListMap"), + ("read_only_space", 0x03bc5): (189, "ScriptContextTableMap"), + ("read_only_space", 0x03bed): (188, "ObjectBoilerplateDescriptionMap"), + ("read_only_space", 0x03c15): (247, "CoverageInfoMap"), + ("read_only_space", 0x03c3d): (242, "AccessorInfoMap"), + ("read_only_space", 0x03c65): (243, "SideEffectCallHandlerInfoMap"), + ("read_only_space", 0x03c8d): (243, "SideEffectFreeCallHandlerInfoMap"), + ("read_only_space", 0x03cb5): (243, "NextCallSideEffectFreeCallHandlerInfoMap"), + ("read_only_space", 0x03cdd): (258, "PreparseDataMap"), + ("read_only_space", 0x03d05): (262, "SharedFunctionInfoMap"), + ("read_only_space", 0x03d2d): (238, "SourceTextModuleMap"), + ("read_only_space", 0x03d55): (239, "SyntheticModuleMap"), + ("read_only_space", 0x03d7d): (246, "CodeDataContainerMap"), + ("read_only_space", 0x03da5): (267, "WasmApiFunctionRefMap"), + ("read_only_space", 0x03dcd): (225, "WasmCapiFunctionDataMap"), + ("read_only_space", 0x03df5): (226, "WasmExportedFunctionDataMap"), + ("read_only_space", 0x03e1d): (269, "WasmInternalFunctionMap"), + ("read_only_space", 0x03e45): (227, "WasmJSFunctionDataMap"), + ("read_only_space", 0x03e6d): (270, "WasmResumeDataMap"), + ("read_only_space", 0x03e95): (272, "WasmTypeInfoMap"), + ("read_only_space", 0x03ebd): (268, "WasmContinuationObjectMap"), + ("read_only_space", 0x03ee5): (274, "WeakCellMap"), ("old_space", 0x0438d): (2116, "ExternalMap"), ("old_space", 0x043b5): (2120, "JSMessageObjectMap"), } # List of known V8 objects. KNOWN_OBJECTS = { - ("read_only_space", 0x021e1): "EmptyWeakArrayList", - ("read_only_space", 0x021ed): "EmptyDescriptorArray", - ("read_only_space", 0x02225): "EmptyEnumCache", - ("read_only_space", 0x02259): "EmptyFixedArray", - ("read_only_space", 0x02261): "NullValue", - ("read_only_space", 0x02369): "UninitializedValue", - ("read_only_space", 0x023e1): "UndefinedValue", - ("read_only_space", 0x02425): "NanValue", - ("read_only_space", 0x02459): "TheHoleValue", - ("read_only_space", 0x02485): "HoleNanValue", - ("read_only_space", 0x024b9): "TrueValue", - ("read_only_space", 0x024f9): "FalseValue", - ("read_only_space", 0x02529): "empty_string", - ("read_only_space", 0x02765): "EmptyScopeInfo", - ("read_only_space", 0x0279d): "ArgumentsMarker", - ("read_only_space", 0x027fd): "Exception", - ("read_only_space", 0x02859): "TerminationException", - ("read_only_space", 0x028c1): "OptimizedOut", - ("read_only_space", 0x02921): "StaleRegister", - ("read_only_space", 0x034c1): "EmptyPropertyArray", - ("read_only_space", 0x034c9): "EmptyByteArray", - ("read_only_space", 0x034d1): "EmptyObjectBoilerplateDescription", - ("read_only_space", 0x03505): "EmptyArrayBoilerplateDescription", - ("read_only_space", 0x03511): "EmptyClosureFeedbackCellArray", - ("read_only_space", 0x03519): "EmptySlowElementDictionary", - ("read_only_space", 0x0353d): "EmptyOrderedHashMap", - ("read_only_space", 0x03551): "EmptyOrderedHashSet", - ("read_only_space", 0x03565): "EmptyFeedbackMetadata", - ("read_only_space", 0x03571): "EmptyPropertyDictionary", - ("read_only_space", 0x03599): "EmptyOrderedPropertyDictionary", - ("read_only_space", 0x035b1): "EmptySwissPropertyDictionary", - ("read_only_space", 0x03605): "NoOpInterceptorInfo", - ("read_only_space", 0x0362d): "EmptyArrayList", - ("read_only_space", 0x03639): "EmptyWeakFixedArray", - ("read_only_space", 0x03641): "InvalidPrototypeValidityCell", - ("read_only_space", 0x03649): "InfinityValue", - ("read_only_space", 0x03655): "MinusZeroValue", - ("read_only_space", 0x03661): "MinusInfinityValue", - ("read_only_space", 0x0366d): "MaxSafeInteger", - ("read_only_space", 0x03679): "MaxUInt32", - ("read_only_space", 0x03685): "SmiMinValue", - ("read_only_space", 0x03691): "SmiMaxValuePlusOne", - ("read_only_space", 0x0369d): "SingleCharacterStringTable", - ("read_only_space", 0x04aa5): "SelfReferenceMarker", - ("read_only_space", 0x04ae5): "BasicBlockCountersMarker", - ("read_only_space", 0x04b29): "OffHeapTrampolineRelocationInfo", - ("read_only_space", 0x04b35): "GlobalThisBindingScopeInfo", - ("read_only_space", 0x04b65): "EmptyFunctionScopeInfo", - ("read_only_space", 0x04b89): "NativeScopeInfo", - ("read_only_space", 0x04ba1): "ShadowRealmScopeInfo", - ("read_only_space", 0x04bb9): "EmptySymbolTable", - ("read_only_space", 0x04bd5): "HashSeed", + ("read_only_space", 0x022a9): "EmptyFixedArray", + ("read_only_space", 0x022b1): "EmptyWeakFixedArray", + ("read_only_space", 0x022b9): "EmptyWeakArrayList", + ("read_only_space", 0x022c5): "NullValue", + ("read_only_space", 0x022e1): "UndefinedValue", + ("read_only_space", 0x022fd): "TheHoleValue", + ("read_only_space", 0x02a99): "EmptyEnumCache", + ("read_only_space", 0x02aa5): "EmptyDescriptorArray", + ("read_only_space", 0x03875): "InvalidPrototypeValidityCell", + ("read_only_space", 0x03f0d): "EmptyArrayList", + ("read_only_space", 0x03f19): "EmptyScopeInfo", + ("read_only_space", 0x03f29): "EmptyObjectBoilerplateDescription", + ("read_only_space", 0x03f35): "EmptyArrayBoilerplateDescription", + ("read_only_space", 0x03f41): "TrueValue", + ("read_only_space", 0x03f5d): "FalseValue", + ("read_only_space", 0x03f79): "EmptyByteArray", + ("read_only_space", 0x03f81): "EmptyPropertyArray", + ("read_only_space", 0x03f89): "EmptyClosureFeedbackCellArray", + ("read_only_space", 0x03f91): "NoOpInterceptorInfo", + ("read_only_space", 0x03fb9): "MinusZeroValue", + ("read_only_space", 0x03fc5): "NanValue", + ("read_only_space", 0x03fd1): "HoleNanValue", + ("read_only_space", 0x03fdd): "InfinityValue", + ("read_only_space", 0x03fe9): "MinusInfinityValue", + ("read_only_space", 0x03ff5): "MaxSafeInteger", + ("read_only_space", 0x04001): "MaxUInt32", + ("read_only_space", 0x0400d): "SmiMinValue", + ("read_only_space", 0x04019): "SmiMaxValuePlusOne", + ("read_only_space", 0x04025): "HashSeed", + ("read_only_space", 0x04035): "SingleCharacterStringTable", + ("read_only_space", 0x0543d): "empty_string", + ("read_only_space", 0x07b19): "UninitializedValue", + ("read_only_space", 0x07b51): "ArgumentsMarker", + ("read_only_space", 0x07b89): "TerminationException", + ("read_only_space", 0x07bc9): "Exception", + ("read_only_space", 0x07be5): "OptimizedOut", + ("read_only_space", 0x07c1d): "StaleRegister", + ("read_only_space", 0x07c55): "SelfReferenceMarker", + ("read_only_space", 0x07c95): "BasicBlockCountersMarker", + ("read_only_space", 0x081c9): "EmptyPropertyDictionary", + ("read_only_space", 0x081f1): "EmptySymbolTable", + ("read_only_space", 0x0820d): "EmptySlowElementDictionary", + ("read_only_space", 0x08231): "EmptyOrderedHashMap", + ("read_only_space", 0x08245): "EmptyOrderedHashSet", + ("read_only_space", 0x08259): "EmptyOrderedPropertyDictionary", + ("read_only_space", 0x0827d): "EmptySwissPropertyDictionary", + ("read_only_space", 0x0829d): "EmptyFeedbackMetadata", + ("read_only_space", 0x082a9): "GlobalThisBindingScopeInfo", + ("read_only_space", 0x082c9): "EmptyFunctionScopeInfo", + ("read_only_space", 0x082ed): "NativeScopeInfo", + ("read_only_space", 0x08305): "ShadowRealmScopeInfo", + ("read_only_space", 0x0831d): "OffHeapTrampolineRelocationInfo", ("old_space", 0x0423d): "ArgumentsIteratorAccessor", ("old_space", 0x04255): "ArrayLengthAccessor", ("old_space", 0x0426d): "BoundFunctionLengthAccessor", From 0aa0b44a40f6c935bf40411dc65b04c80d9b6dd6 Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Thu, 29 Dec 2022 14:48:58 +0100 Subject: [PATCH 087/654] Whitespace to test bots Change-Id: Ia9152f8821baf8f2ebeaf71125f05100cacf898d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4129496 Bot-Commit: Rubber Stamper Cr-Commit-Position: refs/heads/main@{#85038} --- tools/whitespace.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/whitespace.txt b/tools/whitespace.txt index 35772c7da4..92fef9296e 100644 --- a/tools/whitespace.txt +++ b/tools/whitespace.txt @@ -6,7 +6,7 @@ A Smi balks into a war and says: "I'm so deoptimized today!" The doubles heard this and started to unbox. The Smi looked at them when a crazy v8-autoroll account showed up... -The autoroller bought a round of Himbeerbrause. Suddenly........ +The autoroller bought a round of Himbeerbrause. Suddenly......... The bartender starts to shake the bottles............................. I can't add trailing whitespaces, so I'm adding this line............ I'm starting to think that just adding trailing whitespaces might not be bad. From ba1fed5ccb2882d015becd70701caebb7137dfaa Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Thu, 29 Dec 2022 12:14:00 +0100 Subject: [PATCH 088/654] [maglev] Fix several bugs in codegen for `x % x` .. which should result in 0 if x is non-negative, and -0.0 otherwise. - Fix two invalid modulus-related folds. - Handle aliased inputs in Int32ModulusWithOverflow. - Drive-by: rename left/right to lhs/rhs to match the algorithm description. Note there is no deopt loop here since a result of -0.0 will update feedback to kSignedSmallInputs. Bug: v8:7700 Change-Id: I84fca0e43ded152d3520cbe73cc43299ff1c4230 Fixed: chromium:1403575 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128081 Auto-Submit: Jakob Linke Reviewed-by: Darius Mercadier Commit-Queue: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85039} --- src/maglev/arm64/maglev-ir-arm64.cc | 70 +++++++++++------- src/maglev/maglev-graph-builder.cc | 17 +++-- src/maglev/x64/maglev-ir-x64.cc | 74 ++++++++++++------- .../mjsunit/maglev/regress/regress-1403575.js | 14 ++++ 4 files changed, 118 insertions(+), 57 deletions(-) create mode 100644 test/mjsunit/maglev/regress/regress-1403575.js diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 9ca21ecbdb..0e7f3c0a01 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -906,6 +906,10 @@ void Int32ModulusWithOverflow::SetValueLocationConstraints() { } void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { + // If AreAliased(lhs, rhs): + // deopt if lhs < 0 // Minus zero. + // 0 + // // Using same algorithm as in EffectControlLinearizer: // if rhs <= 0 then // rhs = -rhs @@ -922,62 +926,78 @@ void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm, // else // lhs % rhs - Register left = ToRegister(left_input()).W(); - Register right = ToRegister(right_input()).W(); + Register lhs = ToRegister(left_input()).W(); + Register rhs = ToRegister(right_input()).W(); Register out = ToRegister(result()).W(); + static constexpr DeoptimizeReason deopt_reason = + DeoptimizeReason::kDivisionByZero; + + if (lhs == rhs) { + // For the modulus algorithm described above, lhs and rhs must not alias + // each other. + __ Tst(lhs, lhs); + // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only + // allows one deopt reason per IR. + __ EmitEagerDeoptIf(mi, deopt_reason, this); + __ Move(ToRegister(result()), 0); + return; + } + + DCHECK(!AreAliased(lhs, rhs)); + ZoneLabelRef done(masm); ZoneLabelRef rhs_checked(masm); - __ Cmp(right, Immediate(0)); + __ Cmp(rhs, Immediate(0)); __ JumpToDeferredIf( le, - [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register right, + [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs, Int32ModulusWithOverflow* node) { - __ Negs(right, right); - __ EmitEagerDeoptIf(eq, DeoptimizeReason::kDivisionByZero, node); + __ Negs(rhs, rhs); + __ EmitEagerDeoptIf(eq, deopt_reason, node); __ Jump(*rhs_checked); }, - rhs_checked, right, this); + rhs_checked, rhs, this); __ bind(*rhs_checked); - __ Cmp(left, Immediate(0)); + __ Cmp(lhs, Immediate(0)); __ JumpToDeferredIf( lt, - [](MaglevAssembler* masm, ZoneLabelRef done, Register left, - Register right, Register out, Int32ModulusWithOverflow* node) { + [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs, + Register out, Int32ModulusWithOverflow* node) { UseScratchRegisterScope temps(masm); Register res = temps.AcquireW(); - __ neg(left, left); - __ udiv(res, left, right); - __ msub(out, res, right, left); + __ neg(lhs, lhs); + __ udiv(res, lhs, rhs); + __ msub(out, res, rhs, lhs); __ cmp(out, Immediate(0)); // TODO(victorgomes): This ideally should be kMinusZero, but Maglev // only allows one deopt reason per IR. - __ EmitEagerDeoptIf(eq, DeoptimizeReason::kDivisionByZero, node); + __ EmitEagerDeoptIf(eq, deopt_reason, node); __ neg(out, out); __ b(*done); }, - done, left, right, out, this); + done, lhs, rhs, out, this); - Label right_not_power_of_2; + Label rhs_not_power_of_2; UseScratchRegisterScope temps(masm); Register mask = temps.AcquireW(); - __ Add(mask, right, Immediate(-1)); - __ Tst(mask, right); - __ JumpIf(ne, &right_not_power_of_2); + __ Add(mask, rhs, Immediate(-1)); + __ Tst(mask, rhs); + __ JumpIf(ne, &rhs_not_power_of_2); - // {right} is power of 2. - __ And(out, mask, left); + // {rhs} is power of 2. + __ And(out, mask, lhs); __ Jump(*done); - __ bind(&right_not_power_of_2); + __ bind(&rhs_not_power_of_2); // We store the result of the Udiv in a temporary register in case {out} is - // the same as {left} or {right}: we'll still need those 2 registers intact to + // the same as {lhs} or {rhs}: we'll still need those 2 registers intact to // get the remainder. Register res = mask; - __ Udiv(res, left, right); - __ Msub(out, res, right, left); + __ Udiv(res, lhs, rhs); + __ Msub(out, res, rhs, lhs); __ bind(*done); } diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index 224dfa80ef..ee389cbafe 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -516,9 +516,11 @@ ValueNode* MaglevGraphBuilder::TryFoldInt32BinaryOperation(ValueNode* left, ValueNode* right) { switch (kOperation) { case Operation::kModulus: - // x % x = 0 - if (right == left) return GetInt32Constant(0); - break; + // Note the `x % x = 0` fold is invalid since for negative x values the + // result is -0.0. + // TODO(v8:7700): Consider re-enabling this fold if the result is used + // only in contexts where -0.0 is semantically equivalent to 0.0, or if x + // is known to be non-negative. default: // TODO(victorgomes): Implement more folds. break; @@ -531,13 +533,14 @@ ValueNode* MaglevGraphBuilder::TryFoldInt32BinaryOperation(ValueNode* left, int right) { switch (kOperation) { case Operation::kModulus: - // x % 1 = 0 - // x % -1 = 0 - if (right == 1 || right == -1) return GetInt32Constant(0); + // Note the `x % 1 = 0` and `x % -1 = 0` folds are invalid since for + // negative x values the result is -0.0. + // TODO(v8:7700): Consider re-enabling this fold if the result is used + // only in contexts where -0.0 is semantically equivalent to 0.0, or if x + // is known to be non-negative. // TODO(victorgomes): We can emit faster mod operation if {right} is power // of 2, unfortunately we need to know if {left} is negative or not. // Maybe emit a Int32ModulusRightIsPowerOf2? - break; default: // TODO(victorgomes): Implement more folds. break; diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index eddfefb1fe..8ff4993ac6 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -1432,12 +1432,16 @@ void Int32ModulusWithOverflow::SetValueLocationConstraints() { void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { - // Using same algorithm as in EffectControlLinearizer: + // If AreAliased(lhs, rhs): + // deopt if lhs < 0 // Minus zero. + // 0 + // + // Otherwise, use the same algorithm as in EffectControlLinearizer: // if rhs <= 0 then // rhs = -rhs // deopt if rhs == 0 // if lhs < 0 then - // let lhs_abs = -lsh in + // let lhs_abs = -lhs in // let res = lhs_abs % rhs in // deopt if res == 0 // -res @@ -1450,57 +1454,77 @@ void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm, DCHECK(general_temporaries().has(rax)); DCHECK(general_temporaries().has(rdx)); - Register left = ToRegister(left_input()); - Register right = ToRegister(right_input()); + Register lhs = ToRegister(left_input()); + Register rhs = ToRegister(right_input()); + + static constexpr DeoptimizeReason deopt_reason = + DeoptimizeReason::kDivisionByZero; + + if (lhs == rhs) { + // For the modulus algorithm described above, lhs and rhs must not alias + // each other. + __ testl(lhs, lhs); + // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only + // allows one deopt reason per IR. + __ EmitEagerDeoptIf(negative, deopt_reason, this); + __ Move(ToRegister(result()), 0); + return; + } + + DCHECK(!AreAliased(lhs, rhs, rax, rdx)); ZoneLabelRef done(masm); ZoneLabelRef rhs_checked(masm); - __ cmpl(right, Immediate(0)); + __ cmpl(rhs, Immediate(0)); __ JumpToDeferredIf( less_equal, - [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register right, + [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs, Int32ModulusWithOverflow* node) { - __ negl(right); - __ EmitEagerDeoptIf(zero, DeoptimizeReason::kDivisionByZero, node); + __ negl(rhs); + __ EmitEagerDeoptIf(zero, deopt_reason, node); __ jmp(*rhs_checked); }, - rhs_checked, right, this); + rhs_checked, rhs, this); __ bind(*rhs_checked); - __ cmpl(left, Immediate(0)); + __ cmpl(lhs, Immediate(0)); __ JumpToDeferredIf( less, - [](MaglevAssembler* masm, ZoneLabelRef done, Register left, - Register right, Int32ModulusWithOverflow* node) { - __ movl(rax, left); + [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs, + Int32ModulusWithOverflow* node) { + // `divl(divisor)` divides rdx:rax by the divisor and stores the + // quotient in rax, the remainder in rdx. + __ movl(rax, lhs); __ negl(rax); __ xorl(rdx, rdx); - __ divl(right); + __ divl(rhs); __ testl(rdx, rdx); // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only // allows one deopt reason per IR. - __ EmitEagerDeoptIf(equal, DeoptimizeReason::kDivisionByZero, node); + __ EmitEagerDeoptIf(equal, deopt_reason, node); __ negl(rdx); __ jmp(*done); }, - done, left, right, this); + done, lhs, rhs, this); - Label right_not_power_of_2; + Label rhs_not_power_of_2; Register mask = rax; - __ leal(mask, Operand(right, -1)); - __ testl(right, mask); - __ j(not_zero, &right_not_power_of_2, Label::kNear); + __ leal(mask, Operand(rhs, -1)); + __ testl(rhs, mask); + __ j(not_zero, &rhs_not_power_of_2, Label::kNear); - // {right} is power of 2. - __ andl(mask, left); + // {rhs} is power of 2. + __ andl(mask, lhs); __ movl(ToRegister(result()), mask); __ jmp(*done, Label::kNear); - __ bind(&right_not_power_of_2); - __ movl(rax, left); + __ bind(&rhs_not_power_of_2); + // `divl(divisor)` divides rdx:rax by the divisor and stores the + // quotient in rax, the remainder in rdx. + __ movl(rax, lhs); __ xorl(rdx, rdx); - __ divl(right); + __ divl(rhs); // Result is implicitly written to rdx. DCHECK_EQ(ToRegister(result()), rdx); diff --git a/test/mjsunit/maglev/regress/regress-1403575.js b/test/mjsunit/maglev/regress/regress-1403575.js new file mode 100644 index 0000000000..f1ae351baf --- /dev/null +++ b/test/mjsunit/maglev/regress/regress-1403575.js @@ -0,0 +1,14 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --allow-natives-syntax + +function f(y) { + const x = y % y; + return 1 / x; +} +%PrepareFunctionForOptimization(f); +assertEquals(f(2), Infinity); +%OptimizeMaglevOnNextCall(f); +assertEquals(f(-2), -Infinity); From d8aeae69dfbdca966577bd0a3816d16e93409835 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Thu, 29 Dec 2022 19:09:18 -0800 Subject: [PATCH 089/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/d9fb377..83df13d Rolling v8/buildtools: https://chromium.googlesource.com/chromium/src/buildtools/+log/134af4c..cf8d11e Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/fa82fd2..a404e6d Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221228.1.1..version:11.20221229.3.1 Change-Id: I15521073cf263003381aed151b4e48a77e7dc86e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4129163 Bot-Commit: v8-ci-autoroll-builder Commit-Queue: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85040} --- DEPS | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/DEPS b/DEPS index e4b98e0122..489c2c0e39 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221228.1.1', + 'fuchsia_version': 'version:11.20221229.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,9 +105,9 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'd9fb377a525169334fb2aa7970f111d672bab811', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '83df13d86da5d86d4f6cc6c00600e9800f97f269', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '134af4c91bb9ab46fe1165ff1cf0f76900fa5a7e', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf8d11e41138ca777f3eaa09df41bf968c8be6ba', 'buildtools/clang_format/script': Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7', 'buildtools/linux64': { @@ -209,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'fa82fd2fd92b7482e28a8257c3cc79d99e0897ff', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'a404e6d108a230c0c4080a71705c6e6d7c30557a', 'condition': 'checkout_android', }, 'third_party/colorama/src': { From 4f7a03ca5bf5d747b4c85af1490b7cfb84f44563 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Fri, 30 Dec 2022 19:10:29 -0800 Subject: [PATCH 090/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/83df13d..d60073f Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221229.3.1..version:11.20221230.3.1 Change-Id: If6dca69c7f4f975d512a8b4253861b7808db5945 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4129170 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85041} --- DEPS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DEPS b/DEPS index 489c2c0e39..5287c3c975 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221229.3.1', + 'fuchsia_version': 'version:11.20221230.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '83df13d86da5d86d4f6cc6c00600e9800f97f269', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'd60073f94980040a12633463cba8486d915b5eb2', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf8d11e41138ca777f3eaa09df41bf968c8be6ba', 'buildtools/clang_format/script': From bd708c27bba9f1690eb18866325145058657ede1 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Sat, 31 Dec 2022 19:26:59 -0800 Subject: [PATCH 091/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/d60073f..e8a1124 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221230.3.1..version:11.20221231.3.1 Change-Id: I4c7a1d8fd5420cad3b704364aae703a1b105e889 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4127145 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85042} --- DEPS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DEPS b/DEPS index 5287c3c975..ce3564e2ee 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221230.3.1', + 'fuchsia_version': 'version:11.20221231.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'd60073f94980040a12633463cba8486d915b5eb2', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e8a1124e6d9553b9b90fa1d1ae03bb45cf65ea54', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf8d11e41138ca777f3eaa09df41bf968c8be6ba', 'buildtools/clang_format/script': From 1f9ff9cef4484b5a977c71cbe7162981258e5c3d Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Sun, 1 Jan 2023 19:09:45 -0800 Subject: [PATCH 092/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/e8a1124..7ba88a0 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20221231.3.1..version:11.20230101.3.1 Change-Id: Ie9d8ce221aee75e6d578dacaa6fbbecb63e26286 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4127151 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85043} --- DEPS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DEPS b/DEPS index ce3564e2ee..2ed57ed318 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20221231.3.1', + 'fuchsia_version': 'version:11.20230101.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e8a1124e6d9553b9b90fa1d1ae03bb45cf65ea54', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '7ba88a00bd9bce363068803418a79da44f4fa0db', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf8d11e41138ca777f3eaa09df41bf968c8be6ba', 'buildtools/clang_format/script': From e490cf4a72ffdbbcc7484d3f35a8f056059fc6e1 Mon Sep 17 00:00:00 2001 From: Darius M Date: Thu, 29 Dec 2022 18:15:29 +0100 Subject: [PATCH 093/654] [arm64] Make CheckPageFlag behave as on x64/ia32/arm The condition to pass to CheckPageFlag was inverted on arm64 compared to x64/ia32/arm. This caused a bug in Maglev (fixed in https://crrev.com/c/4128556), and seems like it could easily cause other bugs in the future. With this CL, CheckPageFlag now behaves similarly on arm64 and x64/ia32/arm. Change-Id: Ib6b7f157db08d2e771ceb450ab16344c3578c546 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128518 Reviewed-by: Jakob Linke Reviewed-by: Andreas Haas Reviewed-by: Anton Bikineev Commit-Queue: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85044} --- src/codegen/arm64/macro-assembler-arm64.cc | 8 ++++---- src/compiler/backend/arm64/code-generator-arm64.cc | 6 +++--- src/maglev/arm64/maglev-ir-arm64.cc | 8 ++++---- src/wasm/baseline/arm64/liftoff-assembler-arm64.h | 4 ++-- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/codegen/arm64/macro-assembler-arm64.cc b/src/codegen/arm64/macro-assembler-arm64.cc index 91f43d69ac..5f64400739 100644 --- a/src/codegen/arm64/macro-assembler-arm64.cc +++ b/src/codegen/arm64/macro-assembler-arm64.cc @@ -3252,10 +3252,10 @@ void TurboAssembler::CheckPageFlag(const Register& object, int mask, Register scratch = temps.AcquireX(); And(scratch, object, ~kPageAlignmentMask); Ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); - if (cc == eq) { + if (cc == ne) { TestAndBranchIfAnySet(scratch, mask, condition_met); } else { - DCHECK_EQ(cc, ne); + DCHECK_EQ(cc, eq); TestAndBranchIfAllClear(scratch, mask, condition_met); } } @@ -3509,9 +3509,9 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, } CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, - ne, &done); + eq, &done); - CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, ne, + CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done); // Record the actual write. diff --git a/src/compiler/backend/arm64/code-generator-arm64.cc b/src/compiler/backend/arm64/code-generator-arm64.cc index c4488452f3..5db2b14a70 100644 --- a/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/src/compiler/backend/arm64/code-generator-arm64.cc @@ -287,7 +287,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { } __ CheckPageFlag( value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, - ne, exit()); + eq, exit()); SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters() ? SaveFPRegsMode::kSave : SaveFPRegsMode::kIgnore; @@ -956,7 +956,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ JumpIfSmi(value, ool->exit()); } __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, - eq, ool->entry()); + ne, ool->entry()); __ Bind(ool->exit()); break; } @@ -975,7 +975,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ JumpIfSmi(value, ool->exit()); } __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, - eq, ool->entry()); + ne, ool->entry()); __ Bind(ool->exit()); break; } diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 0e7f3c0a01..58903de599 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -1407,7 +1407,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); __ CheckPageFlag( value, - MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, ne, + MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, eq, *done); Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister(); @@ -1433,7 +1433,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, // Consider hoisting the check out of the loop and duplicating the loop into // with and without write barrier. __ CheckPageFlag(array, MemoryChunk::kPointersFromHereAreInterestingMask, - eq, &deferred_write_barrier->deferred_code_label); + ne, &deferred_write_barrier->deferred_code_label); __ bind(*done); } @@ -1453,7 +1453,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, // as the first bailout. __ CheckPageFlag( context, - MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, ne, + MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, eq, *done); __ Move(WriteBarrierDescriptor::ObjectRegister(), generator); @@ -1479,7 +1479,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, context, FieldMemOperand(generator, JSGeneratorObject::kContextOffset)); __ AssertNotSmi(context); __ CheckPageFlag(generator, MemoryChunk::kPointersFromHereAreInterestingMask, - eq, &deferred_context_write_barrier->deferred_code_label); + ne, &deferred_context_write_barrier->deferred_code_label); __ bind(*done); UseScratchRegisterScope temps(masm); diff --git a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index 2a988554a6..865edfb89b 100644 --- a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -521,7 +521,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, // The write barrier. Label write_barrier; Label exit; - CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask, eq, + CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask, ne, &write_barrier); b(&exit); bind(&write_barrier); @@ -531,7 +531,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, } CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, - ne, &exit); + eq, &exit); CallRecordWriteStubSaveRegisters(dst_addr, offset_op, SaveFPRegsMode::kSave, StubCallMode::kCallWasmRuntimeStub); bind(&exit); From d019c8367ea7efeb3d7677347b23e4d172a799f2 Mon Sep 17 00:00:00 2001 From: Darius M Date: Wed, 28 Dec 2022 10:21:33 +0100 Subject: [PATCH 094/654] [maglev][arm64] Implement Float64Ieee754Unary node Bug: v8:7700 Change-Id: I509e4cbd55a60dc0bbd809e35e563dde92278714 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128097 Reviewed-by: Jakob Linke Auto-Submit: Darius Mercadier Commit-Queue: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85045} --- src/maglev/arm64/maglev-ir-arm64.cc | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 58903de599..9850419f67 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -118,7 +118,6 @@ void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm, __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this); } -UNIMPLEMENTED_NODE_WITH_CALL(Float64Ieee754Unary) UNIMPLEMENTED_NODE_WITH_CALL(ConvertReceiver, mode_) UNIMPLEMENTED_NODE(LoadSignedIntDataViewElement, type_) UNIMPLEMENTED_NODE(LoadDoubleDataViewElement) @@ -1170,6 +1169,17 @@ void Float64Exponentiate::GenerateCode(MaglevAssembler* masm, __ CallCFunction(ExternalReference::ieee754_pow_function(), 2); } +int Float64Ieee754Unary::MaxCallStackArgs() const { return 0; } +void Float64Ieee754Unary::SetValueLocationConstraints() { + UseFixed(input(), v0); + DefineSameAsFirst(this); +} +void Float64Ieee754Unary::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ieee_function_, 1); +} + template void Float64CompareNode::SetValueLocationConstraints() { UseRegister(left_input()); From fede2e9dcbb33e976e0d7c0730c45d232326bdfa Mon Sep 17 00:00:00 2001 From: Darius M Date: Mon, 2 Jan 2023 08:27:03 +0100 Subject: [PATCH 095/654] [maglev][arm64] Add StoreTaggedFieldWithWriteBarrier IR Bug: v8:7700 Change-Id: I64801703a23ea4c2485aacb6ae907259fff73990 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128519 Reviewed-by: Victor Gomes Commit-Queue: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85046} --- src/maglev/arm64/maglev-ir-arm64.cc | 57 ++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 9850419f67..f4dc142fa1 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -139,7 +139,6 @@ UNIMPLEMENTED_NODE_WITH_CALL(StoreMap) UNIMPLEMENTED_NODE(StoreDoubleField) UNIMPLEMENTED_NODE(StoreSignedIntDataViewElement, type_) UNIMPLEMENTED_NODE(StoreDoubleDataViewElement) -UNIMPLEMENTED_NODE_WITH_CALL(StoreTaggedFieldWithWriteBarrier) int BuiltinStringFromCharCode::MaxCallStackArgs() const { return AllocateDescriptor::GetStackParameterCount(); @@ -1641,6 +1640,62 @@ void LoadDoubleElement::GenerateCode(MaglevAssembler* masm, FieldMemOperand(elements, FixedArray::kHeaderSize)); } +int StoreTaggedFieldWithWriteBarrier::MaxCallStackArgs() const { + return WriteBarrierDescriptor::GetStackParameterCount(); +} +void StoreTaggedFieldWithWriteBarrier::SetValueLocationConstraints() { + UseFixed(object_input(), WriteBarrierDescriptor::ObjectRegister()); + UseRegister(value_input()); +} +void StoreTaggedFieldWithWriteBarrier::GenerateCode( + MaglevAssembler* masm, const ProcessingState& state) { + // TODO(leszeks): Consider making this an arbitrary register and push/popping + // in the deferred path. + Register object = WriteBarrierDescriptor::ObjectRegister(); + DCHECK_EQ(object, ToRegister(object_input())); + + Register value = ToRegister(value_input()); + + __ AssertNotSmi(object); + __ StoreTaggedField(FieldMemOperand(object, offset()), value); + + ZoneLabelRef done(masm); + DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode( + [](MaglevAssembler* masm, ZoneLabelRef done, Register value, + Register object, StoreTaggedFieldWithWriteBarrier* node) { + ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); + __ CheckPageFlag( + value, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, + eq, *done); + + Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister(); + RegList saved; + if (node->register_snapshot().live_registers.has(slot_reg)) { + saved.set(slot_reg); + } + + __ PushAll(saved); + __ Add(slot_reg, object, node->offset() - kHeapObjectTag); + + SaveFPRegsMode const save_fp_mode = + !node->register_snapshot().live_double_registers.is_empty() + ? SaveFPRegsMode::kSave + : SaveFPRegsMode::kIgnore; + + __ CallRecordWriteStub(object, slot_reg, save_fp_mode); + + __ PopAll(saved); + __ B(*done); + }, + done, value, object, this); + + __ JumpIfSmi(value, *done); + __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, ne, + &deferred_write_barrier->deferred_code_label); + + __ bind(*done); +} + void StringLength::SetValueLocationConstraints() { UseRegister(object_input()); DefineAsRegister(this); From cc6fd8c8c0587e3e79db71c388fe23ec8c5068d8 Mon Sep 17 00:00:00 2001 From: Andreas Haas Date: Mon, 2 Jan 2023 10:16:07 +0100 Subject: [PATCH 096/654] [d8] Check profile for nullptr before serializing it `CpuProfiler::StopProfiling()` return `nullptr` if no profiling is active. Thus a `nullptr` check is needed before serializing the profile returned by `CpuProfiler::StopProfiling()`. R=clemensb@chromium.org Bug: chromium:1394663 Change-Id: I364eeb1d9bd670de5179e242c4462f0fbfc9c607 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4126234 Reviewed-by: Clemens Backes Commit-Queue: Andreas Haas Cr-Commit-Position: refs/heads/main@{#85047} --- src/d8/d8-console.cc | 1 + test/mjsunit/regress/regress-1394663.js | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 test/mjsunit/regress/regress-1394663.js diff --git a/src/d8/d8-console.cc b/src/d8/d8-console.cc index 2adccc4763..a4d65ba61c 100644 --- a/src/d8/d8-console.cc +++ b/src/d8/d8-console.cc @@ -140,6 +140,7 @@ void D8Console::ProfileEnd(const debug::ConsoleCallArguments& args, if (!profiler_) return; CpuProfile* profile = profiler_->StopProfiling(String::Empty(isolate_)); profiler_active_ = false; + if (!profile) return; if (Shell::HasOnProfileEndListener(isolate_)) { StringOutputStream out; profile->Serialize(&out); diff --git a/test/mjsunit/regress/regress-1394663.js b/test/mjsunit/regress/regress-1394663.js new file mode 100644 index 0000000000..95489d6f4f --- /dev/null +++ b/test/mjsunit/regress/regress-1394663.js @@ -0,0 +1,7 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +console.profile(); +console.profileEnd(); +console.profileEnd(); From bfa76578c86cc4d480abbc51d4905e73842c9f1a Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Fri, 23 Dec 2022 14:34:48 +0100 Subject: [PATCH 097/654] [wasm] Simplify {RemoveCompiledCode} Inline {GetCodeLocked} and {ResetCodeLocked} to make the code simpler and more efficient. Drive-by: Make {FindJumpTablesForRegionLocked} private. R=ahaas@chromium.org Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_isolates_rel Change-Id: Id0649924440737f3a04dbb536bd2141732f4f3bb Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4123535 Commit-Queue: Clemens Backes Reviewed-by: Andreas Haas Cr-Commit-Position: refs/heads/main@{#85048} --- src/wasm/wasm-code-manager.cc | 31 ++++++++----------------------- src/wasm/wasm-code-manager.h | 14 ++++++-------- 2 files changed, 14 insertions(+), 31 deletions(-) diff --git a/src/wasm/wasm-code-manager.cc b/src/wasm/wasm-code-manager.cc index fb809fc911..55670172aa 100644 --- a/src/wasm/wasm-code-manager.cc +++ b/src/wasm/wasm-code-manager.cc @@ -1471,29 +1471,11 @@ std::vector NativeModule::SnapshotAllOwnedCode() const { WasmCode* NativeModule::GetCode(uint32_t index) const { base::RecursiveMutexGuard guard(&allocation_mutex_); - return GetCodeLocked(index); -} - -WasmCode* NativeModule::GetCodeLocked(uint32_t index) const { - allocation_mutex_.AssertHeld(); WasmCode* code = code_table_[declared_function_index(module(), index)]; if (code) WasmCodeRefScope::AddRef(code); return code; } -void NativeModule::ResetCodeLocked(uint32_t index) const { - allocation_mutex_.AssertHeld(); - int declared_index = declared_function_index(module(), index); - WasmCode* code = code_table_[declared_index]; - if (!code) return; - - WasmCodeRefScope::AddRef(code); - code_table_[declared_index] = nullptr; - // The code is added to the current {WasmCodeRefScope}, hence the ref - // count cannot drop to zero here. - code->DecRefOnLiveCode(); -} - bool NativeModule::HasCode(uint32_t index) const { base::RecursiveMutexGuard guard(&allocation_mutex_); return code_table_[declared_function_index(module(), index)] != nullptr; @@ -2439,7 +2421,6 @@ void NativeModule::SetDebugState(DebugState new_debug_state) { namespace { bool ShouldRemoveCode(WasmCode* code, NativeModule::RemoveFilter filter) { - if (!code) return false; if (filter == NativeModule::RemoveFilter::kRemoveDebugCode && !code->for_debugging()) { return false; @@ -2459,10 +2440,14 @@ void NativeModule::RemoveCompiledCode(RemoveFilter filter) { CodeSpaceWriteScope write_scope(this); base::RecursiveMutexGuard guard(&allocation_mutex_); for (uint32_t i = 0; i < num_functions; i++) { - uint32_t func_index = i + num_imports; - WasmCode* code = GetCodeLocked(func_index); - if (ShouldRemoveCode(code, filter)) { - ResetCodeLocked(func_index); + WasmCode* code = code_table_[i]; + if (code && ShouldRemoveCode(code, filter)) { + code_table_[i] = nullptr; + // Add the code to the {WasmCodeRefScope}, so the ref count cannot drop to + // zero here. It might in the {WasmCodeRefScope} destructor, though. + WasmCodeRefScope::AddRef(code); + code->DecRefOnLiveCode(); + uint32_t func_index = i + num_imports; UseLazyStubLocked(func_index); } } diff --git a/src/wasm/wasm-code-manager.h b/src/wasm/wasm-code-manager.h index c4d9e18957..f207ca096c 100644 --- a/src/wasm/wasm-code-manager.h +++ b/src/wasm/wasm-code-manager.h @@ -676,10 +676,8 @@ class V8_EXPORT_PRIVATE NativeModule final { std::vector SnapshotAllOwnedCode() const; WasmCode* GetCode(uint32_t index) const; - WasmCode* GetCodeLocked(uint32_t index) const; bool HasCode(uint32_t index) const; bool HasCodeWithTier(uint32_t index, ExecutionTier tier) const; - void ResetCodeLocked(uint32_t index) const; void SetWasmSourceMap(std::unique_ptr source_map); WasmModuleSourceMap* GetWasmSourceMap() const; @@ -689,12 +687,6 @@ class V8_EXPORT_PRIVATE NativeModule final { : kNullAddress; } - // Finds the jump tables that should be used for given code region. This - // information is then passed to {GetNearCallTargetForFunction} and - // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information - // up there. Return an empty struct if no suitable jump tables exist. - JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const; - // Get the call target in the jump table previously looked up via // {FindJumpTablesForRegionLocked}. Address GetNearCallTargetForFunction(uint32_t func_index, @@ -898,6 +890,12 @@ class V8_EXPORT_PRIVATE NativeModule final { WasmCode* CreateEmptyJumpTableInRegionLocked(int jump_table_size, base::AddressRegion); + // Finds the jump tables that should be used for given code region. This + // information is then passed to {GetNearCallTargetForFunction} and + // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information + // up there. Return an empty struct if no suitable jump tables exist. + JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const; + void UpdateCodeSize(size_t, ExecutionTier, ForDebugging); // Hold the {allocation_mutex_} when calling one of these methods. From 848bd2eb506dc34893ff4dab955fa19cdfd6b24d Mon Sep 17 00:00:00 2001 From: Darius M Date: Thu, 29 Dec 2022 16:12:06 +0100 Subject: [PATCH 098/654] [maglev] Fix missing sign-extension in Switch TurboAssembler::Switch considers its "condition" input is 64-bit. However, Maglev's inner integers are rather Smis/32-bit integers. Because we didn't not sign-extend the condition before calling TurboAssembler::Switch, negative values were treated as positive rather than negative. Fixed: chromium:1403749 Bug: v8:7700 Change-Id: I78e934045330012186dc83dea0dc620ec977380a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128080 Reviewed-by: Jakob Linke Commit-Queue: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85049} --- src/codegen/arm64/macro-assembler-arm64.h | 4 ++- src/codegen/x64/macro-assembler-x64.h | 2 ++ src/maglev/arm64/maglev-assembler-arm64-inl.h | 4 +++ src/maglev/maglev-assembler.h | 2 ++ src/maglev/maglev-ir.cc | 7 +++++- src/maglev/x64/maglev-assembler-x64-inl.h | 4 +++ .../maglev/regress/regress-crbug-1403749.js | 25 +++++++++++++++++++ 7 files changed, 46 insertions(+), 2 deletions(-) create mode 100644 test/mjsunit/maglev/regress/regress-crbug-1403749.js diff --git a/src/codegen/arm64/macro-assembler-arm64.h b/src/codegen/arm64/macro-assembler-arm64.h index a4c7f390f7..52639a0096 100644 --- a/src/codegen/arm64/macro-assembler-arm64.h +++ b/src/codegen/arm64/macro-assembler-arm64.h @@ -788,9 +788,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \ inline void FN(const REGTYPE REG, const MemOperand& addr); - LS_MACRO_LIST(DECLARE_FUNCTION) + LS_MACRO_LIST(DECLARE_FUNCTION) #undef DECLARE_FUNCTION + // Caution: if {value} is a 32-bit negative int, it should be sign-extended + // to 64-bit before calling this function. void Switch(Register scratch, Register value, int case_value_base, Label** labels, int num_labels); diff --git a/src/codegen/x64/macro-assembler-x64.h b/src/codegen/x64/macro-assembler-x64.h index 0f1818c17a..6aba3d833c 100644 --- a/src/codegen/x64/macro-assembler-x64.h +++ b/src/codegen/x64/macro-assembler-x64.h @@ -282,6 +282,8 @@ class V8_EXPORT_PRIVATE TurboAssembler j(less, dest); } + // Caution: if {reg} is a 32-bit negative int, it should be sign-extended to + // 64-bit before calling this function. void Switch(Register scrach, Register reg, int case_base_value, Label** labels, int num_labels); diff --git a/src/maglev/arm64/maglev-assembler-arm64-inl.h b/src/maglev/arm64/maglev-assembler-arm64-inl.h index 2b520431b6..8d91aabca4 100644 --- a/src/maglev/arm64/maglev-assembler-arm64-inl.h +++ b/src/maglev/arm64/maglev-assembler-arm64-inl.h @@ -348,6 +348,10 @@ inline void MaglevAssembler::Move(Register dst, Handle obj) { Mov(dst, Operand(obj)); } +inline void MaglevAssembler::SignExtend32To64Bits(Register dst, Register src) { + Mov(dst, Operand(src.W(), SXTW)); +} + inline void MaglevAssembler::CompareInt32(Register src1, Register src2) { Cmp(src1.W(), src2.W()); } diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index ae652b38f8..b21ff1d043 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -154,6 +154,8 @@ class MaglevAssembler : public MacroAssembler { inline void Move(DoubleRegister dst, double n); inline void Move(Register dst, Handle obj); + inline void SignExtend32To64Bits(Register dst, Register src); + inline void CompareInt32(Register src1, Register src2); inline void Jump(Label* target); diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 4bf0d5cce3..21067a195d 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -2278,7 +2278,12 @@ void Switch::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { block->set_start_block_of_switch_case(true); labels[i] = block->label(); } - __ Switch(scratch, ToRegister(value()), value_base(), labels.get(), size()); + Register val = ToRegister(value()); + // Switch requires {val} (the switch's condition) to be 64-bit, but maglev + // usually manipulates/creates 32-bit integers. We thus sign-extend {val} to + // 64-bit to have the correct value for negative numbers. + __ SignExtend32To64Bits(val, val); + __ Switch(scratch, val, value_base(), labels.get(), size()); if (has_fallthrough()) { DCHECK_EQ(fallthrough(), state.next_block()); } else { diff --git a/src/maglev/x64/maglev-assembler-x64-inl.h b/src/maglev/x64/maglev-assembler-x64-inl.h index 1419cd1998..6be20a939e 100644 --- a/src/maglev/x64/maglev-assembler-x64-inl.h +++ b/src/maglev/x64/maglev-assembler-x64-inl.h @@ -309,6 +309,10 @@ inline void MaglevAssembler::Move(Register dst, Handle obj) { MacroAssembler::Move(dst, obj); } +inline void MaglevAssembler::SignExtend32To64Bits(Register dst, Register src) { + movsxlq(dst, src); +} + inline void MaglevAssembler::CompareInt32(Register src1, Register src2) { cmpl(src1, src2); } diff --git a/test/mjsunit/maglev/regress/regress-crbug-1403749.js b/test/mjsunit/maglev/regress/regress-crbug-1403749.js new file mode 100644 index 0000000000..726cba89e8 --- /dev/null +++ b/test/mjsunit/maglev/regress/regress-crbug-1403749.js @@ -0,0 +1,25 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --maglev --allow-natives-syntax + +function f(x) { + let c = x | -6; + switch (c) { + case 0: + case 1: + case 2: + case 3: + case 4: + case -5: return 3; + } + return 0; +} + +%PrepareFunctionForOptimization(f); +assertEquals(0, f(-2147483648)); +assertEquals(3, f(-2127484783)); +%OptimizeMaglevOnNextCall(f); +assertEquals(0, f(-2147483648)); +assertEquals(3, f(-2127484783)); From a2633fe4c1a2672964eb462c57741ba46f3da50f Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Mon, 2 Jan 2023 12:24:09 +0100 Subject: [PATCH 099/654] [bazel] Add v8_disable_write_barriers to build config Which is needed to filter tests in the testrunner. Change-Id: I6ef24ac3e7a716329acb3e860872d5c83f7d84d4 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128521 Commit-Queue: Victor Gomes Commit-Queue: Andreas Haas Auto-Submit: Victor Gomes Reviewed-by: Andreas Haas Cr-Commit-Position: refs/heads/main@{#85050} --- bazel/defs.bzl | 1 + 1 file changed, 1 insertion(+) diff --git a/bazel/defs.bzl b/bazel/defs.bzl index 8c3e3b9d1d..870e7268df 100644 --- a/bazel/defs.bzl +++ b/bazel/defs.bzl @@ -533,6 +533,7 @@ def build_config_content(cpu, icu): ("v8_enable_single_generation", "false"), ("v8_enable_sandbox", "false"), ("v8_enable_shared_ro_heap", "false"), + ("v8_disable_write_barriers", "false"), ("v8_target_cpu", cpu), ]) From 58ae6e4a8153048307d8f3d47a0e46576c54c41b Mon Sep 17 00:00:00 2001 From: pthier Date: Mon, 2 Jan 2023 12:02:33 +0100 Subject: [PATCH 100/654] [regexp] Explicitly include ICUs utf16.h ... required for U16_NEXT in builds with ICU. Change-Id: I2ccda0c3fa7fd0139745e6233c6ab4a5dec46b50 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128520 Commit-Queue: Patrick Thier Reviewed-by: Victor Gomes Cr-Commit-Position: refs/heads/main@{#85051} --- src/regexp/regexp-parser.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/regexp/regexp-parser.cc b/src/regexp/regexp-parser.cc index f420b2eec4..f3f36bf167 100644 --- a/src/regexp/regexp-parser.cc +++ b/src/regexp/regexp-parser.cc @@ -20,6 +20,7 @@ #include "unicode/uniset.h" #include "unicode/unistr.h" #include "unicode/usetiter.h" +#include "unicode/utf16.h" // For U16_NEXT #endif // V8_INTL_SUPPORT namespace v8 { From 84e470845af0e64d00814d7a7604a5b447273f8f Mon Sep 17 00:00:00 2001 From: Andreas Haas Date: Mon, 2 Jan 2023 12:36:31 +0100 Subject: [PATCH 101/654] [wasm][capi] Optimize all functions before serialization The existing implementation of `serialize` in the C-API is to produce a snapshot of the current state of the `NativeModule`. However, so far all users of `serialize` did not care about the runtime of `serialize`, but cared about `deserialize` starting up fast. With this CL all functions of a module get tiered up to TurboFan before serializing the module. R=clemensb@chromium.org Change-Id: Icaef846e33509d90b38559c0b689f798d35a98db Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4129495 Commit-Queue: Andreas Haas Reviewed-by: Clemens Backes Cr-Commit-Position: refs/heads/main@{#85052} --- src/wasm/c-api.cc | 14 +++--- src/wasm/compilation-environment.h | 2 + src/wasm/function-compiler.cc | 4 +- src/wasm/function-compiler.h | 2 +- src/wasm/module-compiler.cc | 48 ++++++++++++++++++++- src/wasm/wasm-engine.cc | 5 ++- src/wasm/wasm-engine.h | 2 +- test/cctest/wasm/test-wasm-shared-engine.cc | 2 +- 8 files changed, 65 insertions(+), 14 deletions(-) diff --git a/src/wasm/c-api.cc b/src/wasm/c-api.cc index af8b6a555f..66e506830e 100644 --- a/src/wasm/c-api.cc +++ b/src/wasm/c-api.cc @@ -1179,13 +1179,13 @@ auto Module::exports() const -> ownvec { return ExportsImpl(impl(this)->v8_object()); } -// We serialize the state of the module when calling this method; an arbitrary -// number of functions can be tiered up to TurboFan, and only those will be -// serialized. -// The caller is responsible for "warming up" the module before serializing. +// We tier up all functions to TurboFan, and then serialize all TurboFan code. +// If no TurboFan code existed before calling this function, then the call to +// {serialize} may take a long time. auto Module::serialize() const -> vec { i::wasm::NativeModule* native_module = impl(this)->v8_object()->native_module(); + native_module->compilation_state()->TierUpAllFunctions(); v8::base::Vector wire_bytes = native_module->wire_bytes(); size_t binary_size = wire_bytes.size(); i::wasm::WasmSerializer serializer(native_module); @@ -1200,8 +1200,10 @@ auto Module::serialize() const -> vec { ptr += binary_size; if (!serializer.SerializeNativeModule( {reinterpret_cast(ptr), serial_size})) { - // Serialization failed, because no TurboFan code is present yet. In this - // case, the serialized module just contains the wire bytes. + // Serialization fails if no TurboFan code is present. This may happen + // because the module does not have any functions, or because another thread + // modifies the {NativeModule} concurrently. In this case, the serialized + // module just contains the wire bytes. buffer = vec::make_uninitialized(size_size + binary_size); byte_t* ptr = buffer.get(); i::wasm::LEBHelper::write_u64v(reinterpret_cast(&ptr), diff --git a/src/wasm/compilation-environment.h b/src/wasm/compilation-environment.h index 60908bdae4..0e41ee1953 100644 --- a/src/wasm/compilation-environment.h +++ b/src/wasm/compilation-environment.h @@ -172,6 +172,8 @@ class V8_EXPORT_PRIVATE CompilationState { // Set a higher priority for the compilation job. void SetHighPriority(); + void TierUpAllFunctions(); + bool failed() const; bool baseline_compilation_finished() const; diff --git a/src/wasm/function-compiler.cc b/src/wasm/function-compiler.cc index 20828eb0dc..42039d75e2 100644 --- a/src/wasm/function-compiler.cc +++ b/src/wasm/function-compiler.cc @@ -158,7 +158,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation( } // static -void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate, +void WasmCompilationUnit::CompileWasmFunction(Counters* counters, NativeModule* native_module, WasmFeatures* detected, const WasmFunction* function, @@ -174,7 +174,7 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate, CompilationEnv env = native_module->CreateCompilationEnv(); WasmCompilationResult result = unit.ExecuteCompilation( &env, native_module->compilation_state()->GetWireBytesStorage().get(), - isolate->counters(), nullptr, detected); + counters, nullptr, detected); if (result.succeeded()) { WasmCodeRefScope code_ref_scope; native_module->PublishCode( diff --git a/src/wasm/function-compiler.h b/src/wasm/function-compiler.h index 5798982ad4..a10927ecf2 100644 --- a/src/wasm/function-compiler.h +++ b/src/wasm/function-compiler.h @@ -76,7 +76,7 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final { ForDebugging for_debugging() const { return for_debugging_; } int func_index() const { return func_index_; } - static void CompileWasmFunction(Isolate*, NativeModule*, + static void CompileWasmFunction(Counters*, NativeModule*, WasmFeatures* detected, const WasmFunction*, ExecutionTier); diff --git a/src/wasm/module-compiler.cc b/src/wasm/module-compiler.cc index 63468aee4b..94856a3938 100644 --- a/src/wasm/module-compiler.cc +++ b/src/wasm/module-compiler.cc @@ -631,6 +631,8 @@ class CompilationStateImpl { compile_job_->UpdatePriority(TaskPriority::kUserBlocking); } + void TierUpAllFunctions(); + bool failed() const { return compile_failed_.load(std::memory_order_relaxed); } @@ -836,6 +838,10 @@ void CompilationState::AddCallback( void CompilationState::SetHighPriority() { Impl(this)->SetHighPriority(); } +void CompilationState::TierUpAllFunctions() { + Impl(this)->TierUpAllFunctions(); +} + void CompilationState::InitializeAfterDeserialization( base::Vector lazy_functions, base::Vector eager_functions) { @@ -1410,7 +1416,8 @@ void TierUpNowForTesting(Isolate* isolate, WasmInstanceObject instance, TransitiveTypeFeedbackProcessor::Process(instance, func_index); } auto* native_module = instance.module_object().native_module(); - wasm::GetWasmEngine()->CompileFunction(isolate, native_module, func_index, + wasm::GetWasmEngine()->CompileFunction(isolate->counters(), native_module, + func_index, wasm::ExecutionTier::kTurbofan); CHECK(!native_module->compilation_state()->failed()); } @@ -3698,6 +3705,45 @@ void CompilationStateImpl::WaitForCompilationEvent( semaphore->Wait(); } +void CompilationStateImpl::TierUpAllFunctions() { + const WasmModule* module = native_module_->module(); + uint32_t num_wasm_functions = module->num_declared_functions; + WasmCodeRefScope code_ref_scope; + CompilationUnitBuilder builder(native_module_); + for (uint32_t i = 0; i < num_wasm_functions; ++i) { + int func_index = module->num_imported_functions + i; + WasmCode* code = native_module_->GetCode(func_index); + if (!code || !code->is_turbofan()) { + builder.AddTopTierUnit(func_index, ExecutionTier::kTurbofan); + } + } + builder.Commit(); + + // Join the compilation, until no compilation units are left anymore. + class DummyDelegate final : public JobDelegate { + bool ShouldYield() override { return false; } + bool IsJoiningThread() const override { return true; } + void NotifyConcurrencyIncrease() override { UNIMPLEMENTED(); } + uint8_t GetTaskId() override { return kMainTaskId; } + }; + + DummyDelegate delegate; + ExecuteCompilationUnits(native_module_weak_, async_counters_.get(), &delegate, + kBaselineOrTopTier); + + // We cannot wait for other compilation threads to finish, so we explicitly + // compile all functions which are not yet available as TurboFan code. + for (uint32_t i = 0; i < num_wasm_functions; ++i) { + uint32_t func_index = module->num_imported_functions + i; + WasmCode* code = native_module_->GetCode(func_index); + if (!code || !code->is_turbofan()) { + wasm::GetWasmEngine()->CompileFunction(async_counters_.get(), + native_module_, func_index, + wasm::ExecutionTier::kTurbofan); + } + } +} + namespace { using JSToWasmWrapperQueue = WrapperQueue>; diff --git a/src/wasm/wasm-engine.cc b/src/wasm/wasm-engine.cc index 1d230ca1f7..6bfecc3e24 100644 --- a/src/wasm/wasm-engine.cc +++ b/src/wasm/wasm-engine.cc @@ -705,12 +705,13 @@ std::shared_ptr WasmEngine::StartStreamingCompilation( isolate, enabled, context, api_method_name, std::move(resolver)); } -void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module, +void WasmEngine::CompileFunction(Counters* counters, + NativeModule* native_module, uint32_t function_index, ExecutionTier tier) { // Note we assume that "one-off" compilations can discard detected features. WasmFeatures detected = WasmFeatures::None(); WasmCompilationUnit::CompileWasmFunction( - isolate, native_module, &detected, + counters, native_module, &detected, &native_module->module()->functions[function_index], tier); } diff --git a/src/wasm/wasm-engine.h b/src/wasm/wasm-engine.h index 709443d09b..29a0fea4f3 100644 --- a/src/wasm/wasm-engine.h +++ b/src/wasm/wasm-engine.h @@ -202,7 +202,7 @@ class V8_EXPORT_PRIVATE WasmEngine { // Compiles the function with the given index at a specific compilation tier. // Errors are stored internally in the CompilationState. // This is mostly used for testing to force a function into a specific tier. - void CompileFunction(Isolate* isolate, NativeModule* native_module, + void CompileFunction(Counters* counters, NativeModule* native_module, uint32_t function_index, ExecutionTier tier); void EnterDebuggingForIsolate(Isolate* isolate); diff --git a/test/cctest/wasm/test-wasm-shared-engine.cc b/test/cctest/wasm/test-wasm-shared-engine.cc index 0c76e6d21f..2591cf92d1 100644 --- a/test/cctest/wasm/test-wasm-shared-engine.cc +++ b/test/cctest/wasm/test-wasm-shared-engine.cc @@ -301,7 +301,7 @@ TEST(SharedEngineRunThreadedTierUp) { Handle instance = isolate->ImportInstance(module); WasmFeatures detected = WasmFeatures::None(); WasmCompilationUnit::CompileWasmFunction( - isolate->isolate(), module.get(), &detected, + isolate->isolate()->counters(), module.get(), &detected, &module->module()->functions[0], ExecutionTier::kTurbofan); CHECK_EQ(23, isolate->Run(instance)); }); From f23a3ecccf46a50543a569ac67274766e97ba122 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 2 Jan 2023 13:57:49 +0000 Subject: [PATCH 102/654] Revert "[wasm][capi] Optimize all functions before serialization" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 84e470845af0e64d00814d7a7604a5b447273f8f. Reason for revert: Breaks TSAN stress-incremental-marking: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20TSAN%20-%20stress-incremental-marking/10433/overview Original change's description: > [wasm][capi] Optimize all functions before serialization > > The existing implementation of `serialize` in the C-API is to produce > a snapshot of the current state of the `NativeModule`. However, so far > all users of `serialize` did not care about the runtime of `serialize`, > but cared about `deserialize` starting up fast. > > With this CL all functions of a module get tiered up to TurboFan before > serializing the module. > > R=​clemensb@chromium.org > > Change-Id: Icaef846e33509d90b38559c0b689f798d35a98db > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4129495 > Commit-Queue: Andreas Haas > Reviewed-by: Clemens Backes > Cr-Commit-Position: refs/heads/main@{#85052} Change-Id: Ie0758b32ef3469fe75d3a45bc3e6950b3b192edb No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4131634 Commit-Queue: Rubber Stamper Auto-Submit: Leszek Swirski Bot-Commit: Rubber Stamper Owners-Override: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#85053} --- src/wasm/c-api.cc | 14 +++--- src/wasm/compilation-environment.h | 2 - src/wasm/function-compiler.cc | 4 +- src/wasm/function-compiler.h | 2 +- src/wasm/module-compiler.cc | 48 +-------------------- src/wasm/wasm-engine.cc | 5 +-- src/wasm/wasm-engine.h | 2 +- test/cctest/wasm/test-wasm-shared-engine.cc | 2 +- 8 files changed, 14 insertions(+), 65 deletions(-) diff --git a/src/wasm/c-api.cc b/src/wasm/c-api.cc index 66e506830e..af8b6a555f 100644 --- a/src/wasm/c-api.cc +++ b/src/wasm/c-api.cc @@ -1179,13 +1179,13 @@ auto Module::exports() const -> ownvec { return ExportsImpl(impl(this)->v8_object()); } -// We tier up all functions to TurboFan, and then serialize all TurboFan code. -// If no TurboFan code existed before calling this function, then the call to -// {serialize} may take a long time. +// We serialize the state of the module when calling this method; an arbitrary +// number of functions can be tiered up to TurboFan, and only those will be +// serialized. +// The caller is responsible for "warming up" the module before serializing. auto Module::serialize() const -> vec { i::wasm::NativeModule* native_module = impl(this)->v8_object()->native_module(); - native_module->compilation_state()->TierUpAllFunctions(); v8::base::Vector wire_bytes = native_module->wire_bytes(); size_t binary_size = wire_bytes.size(); i::wasm::WasmSerializer serializer(native_module); @@ -1200,10 +1200,8 @@ auto Module::serialize() const -> vec { ptr += binary_size; if (!serializer.SerializeNativeModule( {reinterpret_cast(ptr), serial_size})) { - // Serialization fails if no TurboFan code is present. This may happen - // because the module does not have any functions, or because another thread - // modifies the {NativeModule} concurrently. In this case, the serialized - // module just contains the wire bytes. + // Serialization failed, because no TurboFan code is present yet. In this + // case, the serialized module just contains the wire bytes. buffer = vec::make_uninitialized(size_size + binary_size); byte_t* ptr = buffer.get(); i::wasm::LEBHelper::write_u64v(reinterpret_cast(&ptr), diff --git a/src/wasm/compilation-environment.h b/src/wasm/compilation-environment.h index 0e41ee1953..60908bdae4 100644 --- a/src/wasm/compilation-environment.h +++ b/src/wasm/compilation-environment.h @@ -172,8 +172,6 @@ class V8_EXPORT_PRIVATE CompilationState { // Set a higher priority for the compilation job. void SetHighPriority(); - void TierUpAllFunctions(); - bool failed() const; bool baseline_compilation_finished() const; diff --git a/src/wasm/function-compiler.cc b/src/wasm/function-compiler.cc index 42039d75e2..20828eb0dc 100644 --- a/src/wasm/function-compiler.cc +++ b/src/wasm/function-compiler.cc @@ -158,7 +158,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation( } // static -void WasmCompilationUnit::CompileWasmFunction(Counters* counters, +void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate, NativeModule* native_module, WasmFeatures* detected, const WasmFunction* function, @@ -174,7 +174,7 @@ void WasmCompilationUnit::CompileWasmFunction(Counters* counters, CompilationEnv env = native_module->CreateCompilationEnv(); WasmCompilationResult result = unit.ExecuteCompilation( &env, native_module->compilation_state()->GetWireBytesStorage().get(), - counters, nullptr, detected); + isolate->counters(), nullptr, detected); if (result.succeeded()) { WasmCodeRefScope code_ref_scope; native_module->PublishCode( diff --git a/src/wasm/function-compiler.h b/src/wasm/function-compiler.h index a10927ecf2..5798982ad4 100644 --- a/src/wasm/function-compiler.h +++ b/src/wasm/function-compiler.h @@ -76,7 +76,7 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final { ForDebugging for_debugging() const { return for_debugging_; } int func_index() const { return func_index_; } - static void CompileWasmFunction(Counters*, NativeModule*, + static void CompileWasmFunction(Isolate*, NativeModule*, WasmFeatures* detected, const WasmFunction*, ExecutionTier); diff --git a/src/wasm/module-compiler.cc b/src/wasm/module-compiler.cc index 94856a3938..63468aee4b 100644 --- a/src/wasm/module-compiler.cc +++ b/src/wasm/module-compiler.cc @@ -631,8 +631,6 @@ class CompilationStateImpl { compile_job_->UpdatePriority(TaskPriority::kUserBlocking); } - void TierUpAllFunctions(); - bool failed() const { return compile_failed_.load(std::memory_order_relaxed); } @@ -838,10 +836,6 @@ void CompilationState::AddCallback( void CompilationState::SetHighPriority() { Impl(this)->SetHighPriority(); } -void CompilationState::TierUpAllFunctions() { - Impl(this)->TierUpAllFunctions(); -} - void CompilationState::InitializeAfterDeserialization( base::Vector lazy_functions, base::Vector eager_functions) { @@ -1416,8 +1410,7 @@ void TierUpNowForTesting(Isolate* isolate, WasmInstanceObject instance, TransitiveTypeFeedbackProcessor::Process(instance, func_index); } auto* native_module = instance.module_object().native_module(); - wasm::GetWasmEngine()->CompileFunction(isolate->counters(), native_module, - func_index, + wasm::GetWasmEngine()->CompileFunction(isolate, native_module, func_index, wasm::ExecutionTier::kTurbofan); CHECK(!native_module->compilation_state()->failed()); } @@ -3705,45 +3698,6 @@ void CompilationStateImpl::WaitForCompilationEvent( semaphore->Wait(); } -void CompilationStateImpl::TierUpAllFunctions() { - const WasmModule* module = native_module_->module(); - uint32_t num_wasm_functions = module->num_declared_functions; - WasmCodeRefScope code_ref_scope; - CompilationUnitBuilder builder(native_module_); - for (uint32_t i = 0; i < num_wasm_functions; ++i) { - int func_index = module->num_imported_functions + i; - WasmCode* code = native_module_->GetCode(func_index); - if (!code || !code->is_turbofan()) { - builder.AddTopTierUnit(func_index, ExecutionTier::kTurbofan); - } - } - builder.Commit(); - - // Join the compilation, until no compilation units are left anymore. - class DummyDelegate final : public JobDelegate { - bool ShouldYield() override { return false; } - bool IsJoiningThread() const override { return true; } - void NotifyConcurrencyIncrease() override { UNIMPLEMENTED(); } - uint8_t GetTaskId() override { return kMainTaskId; } - }; - - DummyDelegate delegate; - ExecuteCompilationUnits(native_module_weak_, async_counters_.get(), &delegate, - kBaselineOrTopTier); - - // We cannot wait for other compilation threads to finish, so we explicitly - // compile all functions which are not yet available as TurboFan code. - for (uint32_t i = 0; i < num_wasm_functions; ++i) { - uint32_t func_index = module->num_imported_functions + i; - WasmCode* code = native_module_->GetCode(func_index); - if (!code || !code->is_turbofan()) { - wasm::GetWasmEngine()->CompileFunction(async_counters_.get(), - native_module_, func_index, - wasm::ExecutionTier::kTurbofan); - } - } -} - namespace { using JSToWasmWrapperQueue = WrapperQueue>; diff --git a/src/wasm/wasm-engine.cc b/src/wasm/wasm-engine.cc index 6bfecc3e24..1d230ca1f7 100644 --- a/src/wasm/wasm-engine.cc +++ b/src/wasm/wasm-engine.cc @@ -705,13 +705,12 @@ std::shared_ptr WasmEngine::StartStreamingCompilation( isolate, enabled, context, api_method_name, std::move(resolver)); } -void WasmEngine::CompileFunction(Counters* counters, - NativeModule* native_module, +void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module, uint32_t function_index, ExecutionTier tier) { // Note we assume that "one-off" compilations can discard detected features. WasmFeatures detected = WasmFeatures::None(); WasmCompilationUnit::CompileWasmFunction( - counters, native_module, &detected, + isolate, native_module, &detected, &native_module->module()->functions[function_index], tier); } diff --git a/src/wasm/wasm-engine.h b/src/wasm/wasm-engine.h index 29a0fea4f3..709443d09b 100644 --- a/src/wasm/wasm-engine.h +++ b/src/wasm/wasm-engine.h @@ -202,7 +202,7 @@ class V8_EXPORT_PRIVATE WasmEngine { // Compiles the function with the given index at a specific compilation tier. // Errors are stored internally in the CompilationState. // This is mostly used for testing to force a function into a specific tier. - void CompileFunction(Counters* counters, NativeModule* native_module, + void CompileFunction(Isolate* isolate, NativeModule* native_module, uint32_t function_index, ExecutionTier tier); void EnterDebuggingForIsolate(Isolate* isolate); diff --git a/test/cctest/wasm/test-wasm-shared-engine.cc b/test/cctest/wasm/test-wasm-shared-engine.cc index 2591cf92d1..0c76e6d21f 100644 --- a/test/cctest/wasm/test-wasm-shared-engine.cc +++ b/test/cctest/wasm/test-wasm-shared-engine.cc @@ -301,7 +301,7 @@ TEST(SharedEngineRunThreadedTierUp) { Handle instance = isolate->ImportInstance(module); WasmFeatures detected = WasmFeatures::None(); WasmCompilationUnit::CompileWasmFunction( - isolate->isolate()->counters(), module.get(), &detected, + isolate->isolate(), module.get(), &detected, &module->module()->functions[0], ExecutionTier::kTurbofan); CHECK_EQ(23, isolate->Run(instance)); }); From 6ace5cfb51715def93300dc4e3f593a602f05cbe Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Mon, 2 Jan 2023 13:42:25 +0100 Subject: [PATCH 103/654] [platform] Check for failure of DiscardSystemPages The {madvise} call should typically not fail. There are only two errors specified (EINVAL and ENOMEM), both of which would only happen for invalid parameters. Thus add a CHECK that the {madvise} call does not fail. R=mlippautz@chromium.org Bug: chromium:1403519 Change-Id: Ib8c7ca9bbcab921b89305f1614319ecaddd79812 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4124534 Reviewed-by: Michael Lippautz Commit-Queue: Clemens Backes Cr-Commit-Position: refs/heads/main@{#85054} --- src/base/platform/platform-posix.cc | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/base/platform/platform-posix.cc b/src/base/platform/platform-posix.cc index d37b6219d4..5e356c521a 100644 --- a/src/base/platform/platform-posix.cc +++ b/src/base/platform/platform-posix.cc @@ -505,8 +505,9 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { // TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary. // https://crbug.com/823915 #if defined(V8_OS_DARWIN) - if (access != OS::MemoryPermission::kNoAccess) + if (access != OS::MemoryPermission::kNoAccess) { madvise(address, size, MADV_FREE_REUSE); + } #endif return ret == 0; @@ -554,14 +555,19 @@ bool OS::DiscardSystemPages(void* address, size_t size) { } #elif defined(_AIX) || defined(V8_OS_SOLARIS) int ret = madvise(reinterpret_cast(address), size, MADV_FREE); - if (ret != 0 && errno == ENOSYS) + if (ret != 0 && errno == ENOSYS) { return true; // madvise is not available on all systems. - if (ret != 0 && errno == EINVAL) + } + if (ret != 0 && errno == EINVAL) { ret = madvise(reinterpret_cast(address), size, MADV_DONTNEED); + } #else int ret = madvise(address, size, MADV_DONTNEED); #endif - return ret == 0; + // madvise with MADV_DONTNEED only fails on illegal parameters. That's a bug + // in the caller. + CHECK_EQ(0, ret); + return true; } #if !defined(_AIX) From 0621eaff2c028057605b5d145e9a1d764125cab8 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 2 Jan 2023 13:41:22 +0000 Subject: [PATCH 104/654] Revert "Reland "Reland "[static-roots] Enable static roots on supported configurations""" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 4bbbb521f4267d0f8ec6edd07be595eed82dac9c. Reason for revert: Speculative revert for broken roll. Original change's description: > Reland "Reland "[static-roots] Enable static roots on supported configurations"" > > This is a reland of commit b247270178dcfffe9af4389dbb84d1643bfccea4 > > But with static roots disabled on non-external code space builds. > > > Original change's description: > > Reland "[static-roots] Enable static roots on supported configurations" > > > > This is a reland of commit c04ca9cc63417d24455704cbee44eb60b79f7af2 > > > > Original change's description: > > > [static-roots] Enable static roots on supported configurations > > > > > > The static root values are not actually used yet. > > > > > > Bug: v8:13466 > > > Change-Id: I85fc99277c31e0dd4350a305040ab25456051046 > > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4101880 > > > Reviewed-by: Toon Verwaest > > > Commit-Queue: Olivier Flückiger > > > Cr-Commit-Position: refs/heads/main@{#84850} > > > > Bug: v8:13466 > > Change-Id: Id65bb5b19df999dfe930a78993e4bf3343d9f996 > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111641 > > Auto-Submit: Olivier Flückiger > > Reviewed-by: Toon Verwaest > > Commit-Queue: Toon Verwaest > > Cr-Commit-Position: refs/heads/main@{#84991} > > Bug: v8:13466 > Change-Id: Id1f55c1cf8d349338fd49f6cb0ed7dc2e1054a72 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4123534 > Reviewed-by: Toon Verwaest > Commit-Queue: Olivier Flückiger > Cr-Commit-Position: refs/heads/main@{#85037} Bug: v8:13466 Change-Id: I54a9d68871e0ce2c0faeb49fd9947921073a6873 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128090 Bot-Commit: Rubber Stamper Auto-Submit: Leszek Swirski Owners-Override: Leszek Swirski Commit-Queue: Rubber Stamper Cr-Commit-Position: refs/heads/main@{#85055} --- BUILD.gn | 13 +- src/heap/read-only-spaces.cc | 4 - src/roots/static-roots.h | 753 +------------------------------ src/snapshot/static-roots-gen.cc | 3 - tools/v8heapconst.py | 474 +++++++++---------- 5 files changed, 243 insertions(+), 1004 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index 0620dc16ab..26adeb7be9 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -121,7 +121,7 @@ declare_args() { v8_enable_snapshot_native_code_counters = "" # Use pre-generated static root pointer values from static-roots.h. - v8_enable_static_roots = "" + v8_enable_static_roots = false # Enable code-generation-time checking of types in the CodeStubAssembler. v8_enable_verify_csa = false @@ -540,16 +540,11 @@ if (v8_enable_sandbox == "") { if (v8_enable_static_roots == "") { # Static roots are only valid for builds with pointer compression and a - # shared read-only heap. - # TODO(olivf, v8:13466) Some configurations could be supported if we - # introduce different static root files for different build configurations: - # Non-wasm and non-i18n builds have fewer read only roots. Configurations - # without external code space allocate read only roots at a further - # location relative to the cage base. + # shared ro heap. Also, non-wasm and non-i18n builds have fewer read-only + # roots. v8_enable_static_roots = v8_enable_pointer_compression && v8_enable_shared_ro_heap && - v8_enable_pointer_compression_shared_cage && - v8_enable_external_code_space && v8_enable_webassembly && + v8_enable_pointer_compression_shared_cage && v8_enable_webassembly && v8_enable_i18n_support } diff --git a/src/heap/read-only-spaces.cc b/src/heap/read-only-spaces.cc index df277b3782..986fec59be 100644 --- a/src/heap/read-only-spaces.cc +++ b/src/heap/read-only-spaces.cc @@ -561,10 +561,6 @@ void ReadOnlySpace::FreeLinearAllocationArea() { void ReadOnlySpace::EnsurePage() { if (pages_.empty()) EnsureSpaceForAllocation(1); CHECK(!pages_.empty()); - // For all configurations where static roots are supported the read only roots - // are currently allocated in the first page of the cage. - CHECK_IMPLIES(V8_STATIC_ROOTS_BOOL, - heap_->isolate()->cage_base() == pages_.back()->address()); } void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) { diff --git a/src/roots/static-roots.h b/src/roots/static-roots.h index eeced43597..eb4aebd879 100644 --- a/src/roots/static-roots.h +++ b/src/roots/static-roots.h @@ -2,766 +2,17 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -// This file is automatically generated by `tools/dev/gen-static-roots.py`. Do -// not edit manually. - #ifndef V8_ROOTS_STATIC_ROOTS_H_ #define V8_ROOTS_STATIC_ROOTS_H_ #include "src/common/globals.h" - #if V8_STATIC_ROOTS_BOOL -// Disabling Wasm or Intl invalidates the contents of static-roots.h. -// TODO(olivf): To support static roots for multiple build configurations we -// will need to generate target specific versions of this file. -static_assert(V8_ENABLE_WEBASSEMBLY); -static_assert(V8_INTL_SUPPORT); - namespace v8 { namespace internal { -constexpr static std::array StaticReadOnlyRootsPointerTable = { - 0x3235, // free_space_map - 0x38a5, // one_pointer_filler_map - 0x38cd, // two_pointer_filler_map - 0x7b19, // uninitialized_value - 0x22e1, // undefined_value - 0x22fd, // the_hole_value - 0x22c5, // null_value - 0x3f41, // true_value - 0x3f5d, // false_value - 0x543d, // empty_string - 0x2141, // meta_map - 0x31e5, // byte_array_map - 0x2169, // fixed_array_map - 0x21e1, // fixed_cow_array_map - 0x3995, // hash_table_map - 0x2b7d, // symbol_map - 0x2dad, // one_byte_string_map - 0x2f3d, // one_byte_internalized_string_map - 0x2ab5, // scope_info_map - 0x3d05, // shared_function_info_map - 0x3825, // code_map - 0x384d, // cell_map - 0x387d, // global_property_cell_map - 0x2ba5, // foreign_map - 0x2b55, // heap_number_map - 0x396d, // transition_array_map - 0x302d, // thin_one_byte_string_map - 0x2b2d, // feedback_vector_map - 0x3f19, // empty_scope_info - 0x22a9, // empty_fixed_array - 0x2aa5, // empty_descriptor_array - 0x7b51, // arguments_marker - 0x7bc9, // exception - 0x7b89, // termination_exception - 0x7be5, // optimized_out - 0x7c1d, // stale_register - 0x3bc5, // script_context_table_map - 0x2b05, // closure_feedback_cell_array_map - 0x31bd, // feedback_metadata_map - 0x3b9d, // array_list_map - 0x2d5d, // bigint_map - 0x3bed, // object_boilerplate_description_map - 0x320d, // bytecode_array_map - 0x3d7d, // code_data_container_map - 0x3c15, // coverage_info_map - 0x3195, // fixed_double_array_map - 0x3a85, // global_dictionary_map - 0x3945, // many_closures_cell_map - 0x2bcd, // mega_dom_handler_map - 0x2add, // module_info_map - 0x3a35, // name_dictionary_map - 0x38f5, // no_closures_cell_map - 0x3aad, // number_dictionary_map - 0x391d, // one_closure_cell_map - 0x39bd, // ordered_hash_map_map - 0x39e5, // ordered_hash_set_map - 0x3afd, // name_to_index_hash_table_map - 0x3b25, // registered_symbol_table_map - 0x3a0d, // ordered_name_dictionary_map - 0x3cdd, // preparse_data_map - 0x325d, // property_array_map - 0x3c3d, // accessor_info_map - 0x3c65, // side_effect_call_handler_info_map - 0x3c8d, // side_effect_free_call_handler_info_map - 0x3cb5, // next_call_side_effect_free_call_handler_info_map - 0x3ad5, // simple_number_dictionary_map - 0x3285, // small_ordered_hash_map_map - 0x32ad, // small_ordered_hash_set_map - 0x32d5, // small_ordered_name_dictionary_map - 0x3d2d, // source_text_module_map - 0x3a5d, // swiss_name_dictionary_map - 0x3d55, // synthetic_module_map - 0x3da5, // wasm_api_function_ref_map - 0x3dcd, // wasm_capi_function_data_map - 0x3df5, // wasm_exported_function_data_map - 0x3e1d, // wasm_internal_function_map - 0x3e45, // wasm_js_function_data_map - 0x3e6d, // wasm_resume_data_map - 0x3e95, // wasm_type_info_map - 0x3ebd, // wasm_continuation_object_map - 0x2191, // weak_fixed_array_map - 0x21b9, // weak_array_list_map - 0x3b75, // ephemeron_hash_table_map - 0x3b4d, // embedder_data_array_map - 0x3ee5, // weak_cell_map - 0x2d85, // string_map - 0x2dfd, // cons_one_byte_string_map - 0x2dd5, // cons_string_map - 0x3005, // thin_string_map - 0x2e25, // sliced_string_map - 0x2e4d, // sliced_one_byte_string_map - 0x2e75, // external_string_map - 0x2e9d, // external_one_byte_string_map - 0x2ec5, // uncached_external_string_map - 0x2f15, // internalized_string_map - 0x2f65, // external_internalized_string_map - 0x2f8d, // external_one_byte_internalized_string_map - 0x2fb5, // uncached_external_internalized_string_map - 0x2fdd, // uncached_external_one_byte_internalized_string_map - 0x2eed, // uncached_external_one_byte_string_map - 0x307d, // shared_one_byte_string_map - 0x3055, // shared_string_map - 0x30cd, // shared_external_one_byte_string_map - 0x30a5, // shared_external_string_map - 0x311d, // shared_uncached_external_one_byte_string_map - 0x30f5, // shared_uncached_external_string_map - 0x316d, // shared_thin_one_byte_string_map - 0x3145, // shared_thin_string_map - 0x2231, // undefined_map - 0x2281, // the_hole_map - 0x2259, // null_map - 0x2bf5, // boolean_map - 0x2c1d, // uninitialized_map - 0x2c45, // arguments_marker_map - 0x2c6d, // exception_map - 0x2c95, // termination_exception_map - 0x2cbd, // optimized_out_map - 0x2ce5, // stale_register_map - 0x2d0d, // self_reference_marker_map - 0x2d35, // basic_block_counters_marker_map - 0x2a99, // empty_enum_cache - 0x3f81, // empty_property_array - 0x3f79, // empty_byte_array - 0x3f29, // empty_object_boilerplate_description - 0x3f35, // empty_array_boilerplate_description - 0x3f89, // empty_closure_feedback_cell_array - 0x820d, // empty_slow_element_dictionary - 0x8231, // empty_ordered_hash_map - 0x8245, // empty_ordered_hash_set - 0x829d, // empty_feedback_metadata - 0x81c9, // empty_property_dictionary - 0x8259, // empty_ordered_property_dictionary - 0x827d, // empty_swiss_property_dictionary - 0x3f91, // noop_interceptor_info - 0x3f0d, // empty_array_list - 0x22b1, // empty_weak_fixed_array - 0x22b9, // empty_weak_array_list - 0x3875, // invalid_prototype_validity_cell - 0x3fc5, // nan_value - 0x3fd1, // hole_nan_value - 0x3fdd, // infinity_value - 0x3fb9, // minus_zero_value - 0x3fe9, // minus_infinity_value - 0x3ff5, // max_safe_integer - 0x4001, // max_uint_32 - 0x400d, // smi_min_value - 0x4019, // smi_max_value_plus_one - 0x4035, // single_character_string_table - 0x7c55, // self_reference_marker - 0x7c95, // basic_block_counters_marker - 0x831d, // off_heap_trampoline_relocation_info - 0x22e1, // trampoline_trivial_code_data_container - 0x22e1, // trampoline_promise_rejection_code_data_container - 0x82a9, // global_this_binding_scope_info - 0x82c9, // empty_function_scope_info - 0x82ed, // native_scope_info - 0x8305, // shadow_realm_scope_info - 0x81f1, // empty_symbol_table - 0x4025, // hash_seed - 0x5449, // adoptText_string - 0x5461, // approximatelySign_string - 0x5481, // baseName_string - 0x5495, // accounting_string - 0x54ad, // breakType_string - 0x54c5, // calendars_string - 0x54dd, // cardinal_string - 0x54f1, // caseFirst_string - 0x5509, // ceil_string - 0x5519, // compare_string - 0x552d, // collation_string - 0x5545, // collations_string - 0x555d, // compact_string - 0x5571, // compactDisplay_string - 0x558d, // currency_string - 0x55a1, // currencyDisplay_string - 0x55bd, // currencySign_string - 0x55d5, // dateStyle_string - 0x55ed, // dateTimeField_string - 0x5609, // dayPeriod_string - 0x5621, // daysDisplay_string - 0x5639, // decimal_string - 0x564d, // dialect_string - 0x5661, // digital_string - 0x5675, // direction_string - 0x568d, // endRange_string - 0x56a1, // engineering_string - 0x56b9, // exceptZero_string - 0x56d1, // expand_string - 0x56e5, // exponentInteger_string - 0x5701, // exponentMinusSign_string - 0x5721, // exponentSeparator_string - 0x5741, // fallback_string - 0x5755, // first_string - 0x5769, // firstDay_string - 0x577d, // floor_string - 0x5791, // format_string - 0x57a5, // fraction_string - 0x57b9, // fractionalDigits_string - 0x57d5, // fractionalSecond_string - 0x57f1, // full_string - 0x5801, // granularity_string - 0x5819, // grapheme_string - 0x582d, // group_string - 0x5841, // h11_string - 0x5851, // h12_string - 0x5861, // h23_string - 0x5871, // h24_string - 0x5881, // halfCeil_string - 0x5895, // halfEven_string - 0x58a9, // halfExpand_string - 0x58c1, // halfFloor_string - 0x58d9, // halfTrunc_string - 0x58f1, // hour12_string - 0x5905, // hourCycle_string - 0x591d, // hourCycles_string - 0x5935, // hoursDisplay_string - 0x594d, // ideo_string - 0x595d, // ignorePunctuation_string - 0x597d, // Invalid_Date_string - 0x5995, // integer_string - 0x59a9, // isWordLike_string - 0x59c1, // kana_string - 0x59d1, // language_string - 0x59e5, // languageDisplay_string - 0x5a01, // lessPrecision_string - 0x5a1d, // letter_string - 0x5a31, // list_string - 0x5a41, // literal_string - 0x5a55, // locale_string - 0x5a69, // loose_string - 0x5a7d, // lower_string - 0x5a91, // ltr_string - 0x5aa1, // maximumFractionDigits_string - 0x5ac5, // maximumSignificantDigits_string - 0x5ae9, // microsecondsDisplay_string - 0x5b09, // millisecondsDisplay_string - 0x5b29, // min2_string - 0x5b39, // minimalDays_string - 0x5b51, // minimumFractionDigits_string - 0x5b75, // minimumIntegerDigits_string - 0x5b95, // minimumSignificantDigits_string - 0x5bb9, // minus_0 - 0x5bc9, // minusSign_string - 0x5be1, // minutesDisplay_string - 0x5bfd, // monthsDisplay_string - 0x5c19, // morePrecision_string - 0x5c35, // nan_string - 0x5c45, // nanosecondsDisplay_string - 0x5c65, // narrowSymbol_string - 0x5c7d, // negative_string - 0x5c91, // never_string - 0x5ca5, // none_string - 0x5cb5, // notation_string - 0x5cc9, // normal_string - 0x5cdd, // numberingSystem_string - 0x5cf9, // numberingSystems_string - 0x5d15, // numeric_string - 0x5d29, // ordinal_string - 0x5d3d, // percentSign_string - 0x5d55, // plusSign_string - 0x5d69, // quarter_string - 0x5d7d, // region_string - 0x5d91, // relatedYear_string - 0x5da9, // roundingMode_string - 0x5dc1, // roundingPriority_string - 0x5ddd, // rtl_string - 0x5ded, // scientific_string - 0x5e05, // secondsDisplay_string - 0x5e21, // segment_string - 0x5e35, // SegmentIterator_string - 0x5e51, // Segments_string - 0x5e65, // sensitivity_string - 0x5e7d, // sep_string - 0x5e8d, // shared_string - 0x5ea1, // signDisplay_string - 0x5eb9, // standard_string - 0x5ecd, // startRange_string - 0x5ee5, // strict_string - 0x5ef9, // stripIfInteger_string - 0x5f15, // style_string - 0x5f29, // term_string - 0x5f39, // textInfo_string - 0x5f4d, // timeStyle_string - 0x5f65, // timeZones_string - 0x5f7d, // timeZoneName_string - 0x5f95, // trailingZeroDisplay_string - 0x5fb5, // trunc_string - 0x5fc9, // two_digit_string - 0x5fdd, // type_string - 0x5fed, // unknown_string - 0x6001, // upper_string - 0x6015, // usage_string - 0x6029, // useGrouping_string - 0x6041, // unitDisplay_string - 0x6059, // weekday_string - 0x606d, // weekend_string - 0x6081, // weeksDisplay_string - 0x6099, // weekInfo_string - 0x60ad, // yearName_string - 0x60c1, // yearsDisplay_string - 0x60d9, // add_string - 0x60e9, // AggregateError_string - 0x6105, // always_string - 0x6119, // anonymous_function_string - 0x6139, // anonymous_string - 0x6151, // apply_string - 0x6165, // Arguments_string - 0x617d, // arguments_string - 0x6195, // arguments_to_string - 0x61b5, // Array_string - 0x61c9, // array_to_string - 0x61e5, // ArrayBuffer_string - 0x61fd, // ArrayIterator_string - 0x6219, // as_string - 0x6229, // assert_string - 0x623d, // async_string - 0x6251, // AtomicsCondition_string - 0x6271, // AtomicsMutex_string - 0x628d, // auto_string - 0x629d, // await_string - 0x62b1, // BigInt_string - 0x62c5, // bigint_string - 0x62d9, // BigInt64Array_string - 0x62f5, // BigUint64Array_string - 0x6311, // bind_string - 0x6321, // blank_string - 0x6335, // Boolean_string - 0x6349, // boolean_string - 0x635d, // boolean_to_string - 0x6379, // bound__string - 0x638d, // buffer_string - 0x63a1, // byte_length_string - 0x63b9, // byte_offset_string - 0x63d1, // CompileError_string - 0x63e9, // calendar_string - 0x63fd, // callee_string - 0x6411, // caller_string - 0x6425, // cause_string - 0x6439, // character_string - 0x6451, // closure_string - 0x6469, // code_string - 0x6479, // column_string - 0x648d, // computed_string - 0x64a5, // configurable_string - 0x64bd, // conjunction_string - 0x64d5, // console_string - 0x64e9, // constrain_string - 0x6501, // construct_string - 0x6519, // current_string - 0x652d, // Date_string - 0x653d, // date_to_string - 0x6559, // dateAdd_string - 0x656d, // dateFromFields_string - 0x6589, // dateUntil_string - 0x65a1, // day_string - 0x65b1, // dayOfWeek_string - 0x65c9, // dayOfYear_string - 0x65e1, // days_string - 0x65f1, // daysInMonth_string - 0x6609, // daysInWeek_string - 0x6621, // daysInYear_string - 0x6639, // default_string - 0x664d, // defineProperty_string - 0x6669, // deleteProperty_string - 0x6685, // disjunction_string - 0x669d, // done_string - 0x66ad, // dot_brand_string - 0x66c1, // dot_catch_string - 0x66d5, // dot_default_string - 0x66e9, // dot_for_string - 0x66f9, // dot_generator_object_string - 0x6719, // dot_home_object_string - 0x6731, // dot_new_target_string - 0x6749, // dot_result_string - 0x675d, // dot_repl_result_string - 0x6775, // dot_static_home_object_string - 0x471d, // dot_string - 0x6795, // dot_switch_tag_string - 0x67ad, // dotAll_string - 0x67c1, // Error_string - 0x67d5, // EvalError_string - 0x67ed, // enumerable_string - 0x6805, // element_string - 0x6819, // epochMicroseconds_string - 0x6839, // epochMilliseconds_string - 0x6859, // epochNanoseconds_string - 0x6875, // epochSeconds_string - 0x688d, // era_string - 0x689d, // eraYear_string - 0x68b1, // errors_string - 0x68c5, // error_to_string - 0x68e1, // eval_string - 0x68f1, // exception_string - 0x6909, // exec_string - 0x6919, // false_string - 0x692d, // fields_string - 0x6941, // FinalizationRegistry_string - 0x6961, // flags_string - 0x6975, // Float32Array_string - 0x698d, // Float64Array_string - 0x69a5, // fractionalSecondDigits_string - 0x69c9, // from_string - 0x69d9, // Function_string - 0x69ed, // function_native_code_string - 0x6a19, // function_string - 0x6a2d, // function_to_string - 0x6a4d, // Generator_string - 0x6a65, // get_space_string - 0x6a75, // get_string - 0x6a85, // getOffsetNanosecondsFor_string - 0x6aa9, // getOwnPropertyDescriptor_string - 0x6acd, // getPossibleInstantsFor_string - 0x6af1, // getPrototypeOf_string - 0x6b0d, // global_string - 0x6b21, // globalThis_string - 0x6b39, // groups_string - 0x6b4d, // growable_string - 0x6b61, // has_string - 0x6b71, // hasIndices_string - 0x6b89, // hour_string - 0x6b99, // hours_string - 0x6bad, // hoursInDay_string - 0x6bc5, // ignoreCase_string - 0x6bdd, // id_string - 0x6bed, // illegal_access_string - 0x6c09, // illegal_argument_string - 0x6c25, // inLeapYear_string - 0x6c3d, // index_string - 0x6c51, // indices_string - 0x6c65, // Infinity_string - 0x6c79, // infinity_string - 0x6c8d, // input_string - 0x6ca1, // Int16Array_string - 0x6cb9, // Int32Array_string - 0x6cd1, // Int8Array_string - 0x6ce9, // isExtensible_string - 0x6d01, // iso8601_string - 0x6d15, // isoDay_string - 0x6d29, // isoHour_string - 0x6d3d, // isoMicrosecond_string - 0x6d59, // isoMillisecond_string - 0x6d75, // isoMinute_string - 0x6d8d, // isoMonth_string - 0x6da1, // isoNanosecond_string - 0x6dbd, // isoSecond_string - 0x6dd5, // isoYear_string - 0x6de9, // jsMemoryEstimate_string - 0x6e05, // jsMemoryRange_string - 0x6e21, // keys_string - 0x6e31, // largestUnit_string - 0x6e49, // lastIndex_string - 0x6e61, // length_string - 0x6e75, // let_string - 0x6e85, // line_string - 0x6e95, // linear_string - 0x6ea9, // LinkError_string - 0x6ec1, // long_string - 0x6ed1, // Map_string - 0x6ee1, // MapIterator_string - 0x6ef9, // max_byte_length_string - 0x6f15, // medium_string - 0x6f29, // mergeFields_string - 0x6f41, // message_string - 0x6f55, // meta_string - 0x6f65, // minus_Infinity_string - 0x6f7d, // microsecond_string - 0x6f95, // microseconds_string - 0x6fad, // millisecond_string - 0x6fc5, // milliseconds_string - 0x6fdd, // minute_string - 0x6ff1, // minutes_string - 0x7005, // Module_string - 0x7019, // month_string - 0x702d, // monthDayFromFields_string - 0x704d, // months_string - 0x7061, // monthsInYear_string - 0x7079, // monthCode_string - 0x7091, // multiline_string - 0x70a9, // name_string - 0x70b9, // NaN_string - 0x70c9, // nanosecond_string - 0x70e1, // nanoseconds_string - 0x70f9, // narrow_string - 0x710d, // native_string - 0x6731, // new_target_string - 0x7121, // NFC_string - 0x7131, // NFD_string - 0x7141, // NFKC_string - 0x7151, // NFKD_string - 0x7161, // not_equal_string - 0x7179, // null_string - 0x7189, // null_to_string - 0x71a5, // Number_string - 0x71b9, // number_string - 0x71cd, // number_to_string - 0x71e9, // Object_string - 0x71fd, // object_string - 0x7211, // object_to_string - 0x722d, // of_string - 0x723d, // offset_string - 0x7251, // offsetNanoseconds_string - 0x7271, // ok_string - 0x474d, // one_string - 0x7281, // other_string - 0x7295, // overflow_string - 0x72a9, // ownKeys_string - 0x72bd, // percent_string - 0x72d1, // plainDate_string - 0x72e9, // plainTime_string - 0x7301, // position_string - 0x7315, // preventExtensions_string - 0x7335, // private_constructor_string - 0x734d, // Promise_string - 0x7361, // proto_string - 0x7379, // prototype_string - 0x7391, // proxy_string - 0x73a5, // Proxy_string - 0x73b9, // query_colon_string - 0x73c9, // RangeError_string - 0x73e1, // raw_json_string - 0x73f5, // raw_string - 0x7405, // ReferenceError_string - 0x7421, // ReflectGet_string - 0x7439, // ReflectHas_string - 0x7451, // RegExp_string - 0x7465, // regexp_to_string - 0x7481, // reject_string - 0x7495, // relativeTo_string - 0x74ad, // resizable_string - 0x74c5, // ResizableArrayBuffer_string - 0x74e5, // return_string - 0x74f9, // revoke_string - 0x750d, // roundingIncrement_string - 0x752d, // RuntimeError_string - 0x7545, // WebAssemblyException_string - 0x7569, // Script_string - 0x757d, // script_string - 0x7591, // second_string - 0x75a5, // seconds_string - 0x75b9, // short_string - 0x75cd, // Set_string - 0x75dd, // sentence_string - 0x75f1, // set_space_string - 0x7601, // set_string - 0x7611, // SetIterator_string - 0x7629, // setPrototypeOf_string - 0x7645, // ShadowRealm_string - 0x765d, // SharedArray_string - 0x7675, // SharedArrayBuffer_string - 0x7695, // SharedStruct_string - 0x76ad, // sign_string - 0x76bd, // smallestUnit_string - 0x76d5, // source_string - 0x76e9, // sourceText_string - 0x7701, // stack_string - 0x7715, // stackTraceLimit_string - 0x7731, // sticky_string - 0x7745, // String_string - 0x7759, // string_string - 0x776d, // string_to_string - 0x7789, // Symbol_iterator_string - 0x77a5, // symbol_species_string - 0x77c1, // Symbol_species_string - 0x77dd, // Symbol_string - 0x77f1, // symbol_string - 0x7805, // SyntaxError_string - 0x781d, // target_string - 0x7831, // this_function_string - 0x784d, // this_string - 0x785d, // throw_string - 0x7871, // timed_out_string - 0x7889, // timeZone_string - 0x789d, // toJSON_string - 0x78b1, // toString_string - 0x78c5, // true_string - 0x78d5, // total_string - 0x78e9, // TypeError_string - 0x7901, // Uint16Array_string - 0x7919, // Uint32Array_string - 0x7931, // Uint8Array_string - 0x7949, // Uint8ClampedArray_string - 0x7969, // undefined_string - 0x7981, // undefined_to_string - 0x79a1, // unicode_string - 0x79b5, // unicodeSets_string - 0x79cd, // unit_string - 0x79dd, // URIError_string - 0x79f1, // UTC_string - 0x7a01, // value_string - 0x7a15, // valueOf_string - 0x7a29, // WeakMap_string - 0x7a3d, // WeakRef_string - 0x7a51, // WeakSet_string - 0x7a65, // week_string - 0x7a75, // weeks_string - 0x7a89, // weekOfYear_string - 0x7aa1, // word_string - 0x7ab1, // writable_string - 0x7ac5, // yearMonthFromFields_string - 0x7ae5, // year_string - 0x7af5, // years_string - 0x473d, // zero_string - 0x7cd9, // array_buffer_wasm_memory_symbol - 0x7ce9, // call_site_info_symbol - 0x7cf9, // console_context_id_symbol - 0x7d09, // console_context_name_symbol - 0x7d19, // class_fields_symbol - 0x7d29, // class_positions_symbol - 0x7d39, // elements_transition_symbol - 0x7d49, // error_end_pos_symbol - 0x7d59, // error_script_symbol - 0x7d69, // error_stack_symbol - 0x7d79, // error_start_pos_symbol - 0x7d89, // frozen_symbol - 0x7d99, // interpreter_trampoline_symbol - 0x7da9, // mega_dom_symbol - 0x7db9, // megamorphic_symbol - 0x7dc9, // native_context_index_symbol - 0x7dd9, // nonextensible_symbol - 0x7de9, // not_mapped_symbol - 0x7df9, // promise_debug_marker_symbol - 0x7e09, // promise_debug_message_symbol - 0x7e19, // promise_forwarding_handler_symbol - 0x7e29, // promise_handled_by_symbol - 0x7e39, // promise_awaited_by_symbol - 0x7e49, // regexp_result_names_symbol - 0x7e59, // regexp_result_regexp_input_symbol - 0x7e69, // regexp_result_regexp_last_index_symbol - 0x7e79, // sealed_symbol - 0x7e89, // strict_function_transition_symbol - 0x7e99, // template_literal_function_literal_id_symbol - 0x7ea9, // template_literal_slot_id_symbol - 0x7eb9, // wasm_exception_tag_symbol - 0x7ec9, // wasm_exception_values_symbol - 0x7ed9, // wasm_uncatchable_symbol - 0x7ee9, // wasm_wrapped_object_symbol - 0x7ef9, // wasm_debug_proxy_cache_symbol - 0x7f09, // wasm_debug_proxy_names_symbol - 0x7f19, // uninitialized_symbol - 0x7f29, // async_iterator_symbol - 0x7f59, // intl_fallback_symbol - 0x7f91, // match_all_symbol - 0x7fbd, // match_symbol - 0x7fe5, // replace_symbol - 0x8011, // search_symbol - 0x803d, // split_symbol - 0x8065, // to_primitive_symbol - 0x8095, // unscopables_symbol - 0x80c5, // has_instance_symbol - 0x80f5, // to_string_tag_symbol - 0x2319, // promise_fulfill_reaction_job_task_map - 0x2341, // promise_reject_reaction_job_task_map - 0x2369, // callable_task_map - 0x2391, // callback_task_map - 0x23b9, // promise_resolve_thenable_job_task_map - 0x23e1, // function_template_info_map - 0x2409, // object_template_info_map - 0x2431, // access_check_info_map - 0x2459, // accessor_pair_map - 0x2481, // aliased_arguments_entry_map - 0x24a9, // allocation_memento_map - 0x24d1, // array_boilerplate_description_map - 0x24f9, // asm_wasm_data_map - 0x2521, // async_generator_request_map - 0x2549, // break_point_map - 0x2571, // break_point_info_map - 0x2599, // call_site_info_map - 0x25c1, // class_positions_map - 0x25e9, // debug_info_map - 0x2611, // enum_cache_map - 0x2639, // error_stack_data_map - 0x2661, // function_template_rare_data_map - 0x2689, // interceptor_info_map - 0x26b1, // interpreter_data_map - 0x26d9, // module_request_map - 0x2701, // promise_capability_map - 0x2729, // promise_on_stack_map - 0x2751, // promise_reaction_map - 0x2779, // property_descriptor_object_map - 0x27a1, // prototype_info_map - 0x27c9, // regexp_boilerplate_description_map - 0x27f1, // script_map - 0x2819, // script_or_module_map - 0x2841, // module_info_entry_map - 0x2869, // stack_frame_info_map - 0x2891, // template_object_description_map - 0x28b9, // tuple2_map - 0x28e1, // wasm_exception_tag_map - 0x2909, // wasm_indirect_function_table_map - 0x370d, // sloppy_arguments_elements_map - 0x2209, // descriptor_array_map - 0x3735, // strong_descriptor_array_map - 0x32fd, // uncompiled_data_without_preparse_data_map - 0x3325, // uncompiled_data_with_preparse_data_map - 0x334d, // uncompiled_data_without_preparse_data_with_job_map - 0x3375, // uncompiled_data_with_preparse_data_and_job_map - 0x339d, // on_heap_basic_block_profiler_data_map - 0x33c5, // turbofan_bitset_type_map - 0x33ed, // turbofan_union_type_map - 0x3415, // turbofan_range_type_map - 0x343d, // turbofan_heap_constant_type_map - 0x3465, // turbofan_other_number_constant_type_map - 0x348d, // turboshaft_word32type_map - 0x34b5, // turboshaft_word32range_type_map - 0x375d, // turboshaft_word32set_type_map - 0x34dd, // turboshaft_word64type_map - 0x3505, // turboshaft_word64range_type_map - 0x3785, // turboshaft_word64set_type_map - 0x352d, // turboshaft_float64type_map - 0x3555, // turboshaft_float64range_type_map - 0x37ad, // turboshaft_float64set_type_map - 0x357d, // internal_class_map - 0x35a5, // smi_pair_map - 0x35cd, // smi_box_map - 0x35f5, // exported_sub_class_base_map - 0x361d, // exported_sub_class_map - 0x3645, // abstract_internal_class_subclass1_map - 0x366d, // abstract_internal_class_subclass2_map - 0x37d5, // internal_class_with_smi_elements_map - 0x37fd, // internal_class_with_struct_elements_map - 0x3695, // exported_sub_class2_map - 0x36bd, // sort_state_map - 0x36e5, // wasm_string_view_iter_map - 0x2931, // allocation_site_map - 0x2959, // allocation_site_without_weaknext_map - 0x814d, // constructor_string - 0x8165, // next_string - 0x8175, // resolve_string - 0x8189, // then_string - 0x8199, // iterator_symbol - 0x81a9, // species_symbol - 0x81b9, // is_concat_spreadable_symbol - 0x2981, // load_handler1_map - 0x29a9, // load_handler2_map - 0x29d1, // load_handler3_map - 0x29f9, // store_handler0_map - 0x2a21, // store_handler1_map - 0x2a49, // store_handler2_map - 0x2a71, // store_handler3_map -}; +// TODO(olivf, v8:13466): Enable and add static roots +constexpr static std::array StaticReadOnlyRootsPointerTable = {}; } // namespace internal } // namespace v8 diff --git a/src/snapshot/static-roots-gen.cc b/src/snapshot/static-roots-gen.cc index ae0068d29b..79036ea89c 100644 --- a/src/snapshot/static-roots-gen.cc +++ b/src/snapshot/static-roots-gen.cc @@ -30,9 +30,6 @@ void StaticRootsTableGen::write(Isolate* isolate, const char* file) { "that can be\n" << "// found in the LICENSE file.\n" << "\n" - << "// This file is automatically generated by " - "`tools/dev/gen-static-roots.py`. Do\n// not edit manually.\n" - << "\n" << "#ifndef V8_ROOTS_STATIC_ROOTS_H_\n" << "#define V8_ROOTS_STATIC_ROOTS_H_\n" << "\n" diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py index 47ad87265c..ee486d1cc5 100644 --- a/tools/v8heapconst.py +++ b/tools/v8heapconst.py @@ -295,250 +295,250 @@ INSTANCE_TYPES = { # List of known V8 maps. KNOWN_MAPS = { ("read_only_space", 0x02141): (255, "MetaMap"), - ("read_only_space", 0x02169): (175, "FixedArrayMap"), - ("read_only_space", 0x02191): (240, "WeakFixedArrayMap"), + ("read_only_space", 0x02169): (131, "NullMap"), + ("read_only_space", 0x02191): (237, "StrongDescriptorArrayMap"), ("read_only_space", 0x021b9): (273, "WeakArrayListMap"), - ("read_only_space", 0x021e1): (175, "FixedCOWArrayMap"), - ("read_only_space", 0x02209): (236, "DescriptorArrayMap"), - ("read_only_space", 0x02231): (131, "UndefinedMap"), - ("read_only_space", 0x02259): (131, "NullMap"), - ("read_only_space", 0x02281): (131, "TheHoleMap"), - ("read_only_space", 0x02319): (132, "PromiseFulfillReactionJobTaskMap"), - ("read_only_space", 0x02341): (133, "PromiseRejectReactionJobTaskMap"), - ("read_only_space", 0x02369): (134, "CallableTaskMap"), - ("read_only_space", 0x02391): (135, "CallbackTaskMap"), - ("read_only_space", 0x023b9): (136, "PromiseResolveThenableJobTaskMap"), - ("read_only_space", 0x023e1): (139, "FunctionTemplateInfoMap"), - ("read_only_space", 0x02409): (140, "ObjectTemplateInfoMap"), - ("read_only_space", 0x02431): (141, "AccessCheckInfoMap"), - ("read_only_space", 0x02459): (142, "AccessorPairMap"), - ("read_only_space", 0x02481): (143, "AliasedArgumentsEntryMap"), - ("read_only_space", 0x024a9): (144, "AllocationMementoMap"), - ("read_only_space", 0x024d1): (146, "ArrayBoilerplateDescriptionMap"), - ("read_only_space", 0x024f9): (147, "AsmWasmDataMap"), - ("read_only_space", 0x02521): (148, "AsyncGeneratorRequestMap"), - ("read_only_space", 0x02549): (149, "BreakPointMap"), - ("read_only_space", 0x02571): (150, "BreakPointInfoMap"), - ("read_only_space", 0x02599): (151, "CallSiteInfoMap"), - ("read_only_space", 0x025c1): (152, "ClassPositionsMap"), - ("read_only_space", 0x025e9): (153, "DebugInfoMap"), - ("read_only_space", 0x02611): (154, "EnumCacheMap"), - ("read_only_space", 0x02639): (155, "ErrorStackDataMap"), - ("read_only_space", 0x02661): (157, "FunctionTemplateRareDataMap"), - ("read_only_space", 0x02689): (158, "InterceptorInfoMap"), - ("read_only_space", 0x026b1): (159, "InterpreterDataMap"), - ("read_only_space", 0x026d9): (160, "ModuleRequestMap"), - ("read_only_space", 0x02701): (161, "PromiseCapabilityMap"), - ("read_only_space", 0x02729): (162, "PromiseOnStackMap"), - ("read_only_space", 0x02751): (163, "PromiseReactionMap"), - ("read_only_space", 0x02779): (164, "PropertyDescriptorObjectMap"), - ("read_only_space", 0x027a1): (165, "PrototypeInfoMap"), - ("read_only_space", 0x027c9): (166, "RegExpBoilerplateDescriptionMap"), - ("read_only_space", 0x027f1): (167, "ScriptMap"), - ("read_only_space", 0x02819): (168, "ScriptOrModuleMap"), - ("read_only_space", 0x02841): (169, "SourceTextModuleInfoEntryMap"), - ("read_only_space", 0x02869): (170, "StackFrameInfoMap"), - ("read_only_space", 0x02891): (171, "TemplateObjectDescriptionMap"), - ("read_only_space", 0x028b9): (172, "Tuple2Map"), - ("read_only_space", 0x028e1): (173, "WasmExceptionTagMap"), - ("read_only_space", 0x02909): (174, "WasmIndirectFunctionTableMap"), - ("read_only_space", 0x02931): (145, "AllocationSiteWithWeakNextMap"), - ("read_only_space", 0x02959): (145, "AllocationSiteWithoutWeakNextMap"), - ("read_only_space", 0x02981): (137, "LoadHandler1Map"), - ("read_only_space", 0x029a9): (137, "LoadHandler2Map"), - ("read_only_space", 0x029d1): (137, "LoadHandler3Map"), - ("read_only_space", 0x029f9): (138, "StoreHandler0Map"), - ("read_only_space", 0x02a21): (138, "StoreHandler1Map"), - ("read_only_space", 0x02a49): (138, "StoreHandler2Map"), - ("read_only_space", 0x02a71): (138, "StoreHandler3Map"), - ("read_only_space", 0x02ab5): (261, "ScopeInfoMap"), - ("read_only_space", 0x02add): (175, "ModuleInfoMap"), - ("read_only_space", 0x02b05): (187, "ClosureFeedbackCellArrayMap"), - ("read_only_space", 0x02b2d): (250, "FeedbackVectorMap"), - ("read_only_space", 0x02b55): (130, "HeapNumberMap"), - ("read_only_space", 0x02b7d): (128, "SymbolMap"), - ("read_only_space", 0x02ba5): (204, "ForeignMap"), - ("read_only_space", 0x02bcd): (256, "MegaDomHandlerMap"), - ("read_only_space", 0x02bf5): (131, "BooleanMap"), - ("read_only_space", 0x02c1d): (131, "UninitializedMap"), - ("read_only_space", 0x02c45): (131, "ArgumentsMarkerMap"), - ("read_only_space", 0x02c6d): (131, "ExceptionMap"), - ("read_only_space", 0x02c95): (131, "TerminationExceptionMap"), - ("read_only_space", 0x02cbd): (131, "OptimizedOutMap"), - ("read_only_space", 0x02ce5): (131, "StaleRegisterMap"), - ("read_only_space", 0x02d0d): (131, "SelfReferenceMarkerMap"), - ("read_only_space", 0x02d35): (131, "BasicBlockCountersMarkerMap"), - ("read_only_space", 0x02d5d): (129, "BigIntMap"), - ("read_only_space", 0x02d85): (32, "StringMap"), - ("read_only_space", 0x02dad): (40, "OneByteStringMap"), - ("read_only_space", 0x02dd5): (33, "ConsStringMap"), - ("read_only_space", 0x02dfd): (41, "ConsOneByteStringMap"), - ("read_only_space", 0x02e25): (35, "SlicedStringMap"), - ("read_only_space", 0x02e4d): (43, "SlicedOneByteStringMap"), - ("read_only_space", 0x02e75): (34, "ExternalStringMap"), - ("read_only_space", 0x02e9d): (42, "ExternalOneByteStringMap"), - ("read_only_space", 0x02ec5): (50, "UncachedExternalStringMap"), - ("read_only_space", 0x02eed): (58, "UncachedExternalOneByteStringMap"), - ("read_only_space", 0x02f15): (0, "InternalizedStringMap"), - ("read_only_space", 0x02f3d): (8, "OneByteInternalizedStringMap"), - ("read_only_space", 0x02f65): (2, "ExternalInternalizedStringMap"), - ("read_only_space", 0x02f8d): (10, "ExternalOneByteInternalizedStringMap"), - ("read_only_space", 0x02fb5): (18, "UncachedExternalInternalizedStringMap"), - ("read_only_space", 0x02fdd): (26, "UncachedExternalOneByteInternalizedStringMap"), - ("read_only_space", 0x03005): (37, "ThinStringMap"), - ("read_only_space", 0x0302d): (45, "ThinOneByteStringMap"), - ("read_only_space", 0x03055): (96, "SharedStringMap"), - ("read_only_space", 0x0307d): (104, "SharedOneByteStringMap"), - ("read_only_space", 0x030a5): (98, "SharedExternalStringMap"), - ("read_only_space", 0x030cd): (106, "SharedExternalOneByteStringMap"), - ("read_only_space", 0x030f5): (114, "SharedUncachedExternalStringMap"), - ("read_only_space", 0x0311d): (122, "SharedUncachedExternalOneByteStringMap"), - ("read_only_space", 0x03145): (101, "SharedThinStringMap"), - ("read_only_space", 0x0316d): (109, "SharedThinOneByteStringMap"), - ("read_only_space", 0x03195): (192, "FixedDoubleArrayMap"), - ("read_only_space", 0x031bd): (249, "FeedbackMetadataArrayMap"), - ("read_only_space", 0x031e5): (190, "ByteArrayMap"), - ("read_only_space", 0x0320d): (191, "BytecodeArrayMap"), - ("read_only_space", 0x03235): (252, "FreeSpaceMap"), - ("read_only_space", 0x0325d): (259, "PropertyArrayMap"), - ("read_only_space", 0x03285): (231, "SmallOrderedHashMapMap"), - ("read_only_space", 0x032ad): (232, "SmallOrderedHashSetMap"), - ("read_only_space", 0x032d5): (233, "SmallOrderedNameDictionaryMap"), - ("read_only_space", 0x032fd): (222, "UncompiledDataWithoutPreparseDataMap"), - ("read_only_space", 0x03325): (220, "UncompiledDataWithPreparseDataMap"), - ("read_only_space", 0x0334d): (223, "UncompiledDataWithoutPreparseDataWithJobMap"), - ("read_only_space", 0x03375): (221, "UncompiledDataWithPreparseDataAndJobMap"), - ("read_only_space", 0x0339d): (257, "OnHeapBasicBlockProfilerDataMap"), - ("read_only_space", 0x033c5): (215, "TurbofanBitsetTypeMap"), - ("read_only_space", 0x033ed): (219, "TurbofanUnionTypeMap"), - ("read_only_space", 0x03415): (218, "TurbofanRangeTypeMap"), - ("read_only_space", 0x0343d): (216, "TurbofanHeapConstantTypeMap"), - ("read_only_space", 0x03465): (217, "TurbofanOtherNumberConstantTypeMap"), - ("read_only_space", 0x0348d): (198, "TurboshaftWord32TypeMap"), - ("read_only_space", 0x034b5): (199, "TurboshaftWord32RangeTypeMap"), - ("read_only_space", 0x034dd): (201, "TurboshaftWord64TypeMap"), - ("read_only_space", 0x03505): (202, "TurboshaftWord64RangeTypeMap"), - ("read_only_space", 0x0352d): (195, "TurboshaftFloat64TypeMap"), - ("read_only_space", 0x03555): (196, "TurboshaftFloat64RangeTypeMap"), - ("read_only_space", 0x0357d): (253, "InternalClassMap"), - ("read_only_space", 0x035a5): (264, "SmiPairMap"), - ("read_only_space", 0x035cd): (263, "SmiBoxMap"), - ("read_only_space", 0x035f5): (228, "ExportedSubClassBaseMap"), - ("read_only_space", 0x0361d): (229, "ExportedSubClassMap"), - ("read_only_space", 0x03645): (234, "AbstractInternalClassSubclass1Map"), - ("read_only_space", 0x0366d): (235, "AbstractInternalClassSubclass2Map"), - ("read_only_space", 0x03695): (230, "ExportedSubClass2Map"), - ("read_only_space", 0x036bd): (265, "SortStateMap"), - ("read_only_space", 0x036e5): (271, "WasmStringViewIterMap"), - ("read_only_space", 0x0370d): (194, "SloppyArgumentsElementsMap"), - ("read_only_space", 0x03735): (237, "StrongDescriptorArrayMap"), - ("read_only_space", 0x0375d): (200, "TurboshaftWord32SetTypeMap"), - ("read_only_space", 0x03785): (203, "TurboshaftWord64SetTypeMap"), - ("read_only_space", 0x037ad): (197, "TurboshaftFloat64SetTypeMap"), - ("read_only_space", 0x037d5): (193, "InternalClassWithSmiElementsMap"), - ("read_only_space", 0x037fd): (254, "InternalClassWithStructElementsMap"), - ("read_only_space", 0x03825): (245, "CodeMap"), - ("read_only_space", 0x0384d): (244, "CellMap"), - ("read_only_space", 0x0387d): (260, "GlobalPropertyCellMap"), - ("read_only_space", 0x038a5): (251, "OnePointerFillerMap"), - ("read_only_space", 0x038cd): (251, "TwoPointerFillerMap"), - ("read_only_space", 0x038f5): (156, "NoClosuresCellMap"), - ("read_only_space", 0x0391d): (156, "OneClosureCellMap"), - ("read_only_space", 0x03945): (156, "ManyClosuresCellMap"), - ("read_only_space", 0x0396d): (241, "TransitionArrayMap"), - ("read_only_space", 0x03995): (176, "HashTableMap"), - ("read_only_space", 0x039bd): (182, "OrderedHashMapMap"), - ("read_only_space", 0x039e5): (183, "OrderedHashSetMap"), - ("read_only_space", 0x03a0d): (184, "OrderedNameDictionaryMap"), - ("read_only_space", 0x03a35): (179, "NameDictionaryMap"), - ("read_only_space", 0x03a5d): (266, "SwissNameDictionaryMap"), - ("read_only_space", 0x03a85): (178, "GlobalDictionaryMap"), - ("read_only_space", 0x03aad): (181, "NumberDictionaryMap"), - ("read_only_space", 0x03ad5): (186, "SimpleNumberDictionaryMap"), - ("read_only_space", 0x03afd): (180, "NameToIndexHashTableMap"), - ("read_only_space", 0x03b25): (185, "RegisteredSymbolTableMap"), - ("read_only_space", 0x03b4d): (248, "EmbedderDataArrayMap"), - ("read_only_space", 0x03b75): (177, "EphemeronHashTableMap"), - ("read_only_space", 0x03b9d): (175, "ArrayListMap"), - ("read_only_space", 0x03bc5): (189, "ScriptContextTableMap"), - ("read_only_space", 0x03bed): (188, "ObjectBoilerplateDescriptionMap"), - ("read_only_space", 0x03c15): (247, "CoverageInfoMap"), - ("read_only_space", 0x03c3d): (242, "AccessorInfoMap"), - ("read_only_space", 0x03c65): (243, "SideEffectCallHandlerInfoMap"), - ("read_only_space", 0x03c8d): (243, "SideEffectFreeCallHandlerInfoMap"), - ("read_only_space", 0x03cb5): (243, "NextCallSideEffectFreeCallHandlerInfoMap"), - ("read_only_space", 0x03cdd): (258, "PreparseDataMap"), - ("read_only_space", 0x03d05): (262, "SharedFunctionInfoMap"), - ("read_only_space", 0x03d2d): (238, "SourceTextModuleMap"), - ("read_only_space", 0x03d55): (239, "SyntheticModuleMap"), - ("read_only_space", 0x03d7d): (246, "CodeDataContainerMap"), - ("read_only_space", 0x03da5): (267, "WasmApiFunctionRefMap"), - ("read_only_space", 0x03dcd): (225, "WasmCapiFunctionDataMap"), - ("read_only_space", 0x03df5): (226, "WasmExportedFunctionDataMap"), - ("read_only_space", 0x03e1d): (269, "WasmInternalFunctionMap"), - ("read_only_space", 0x03e45): (227, "WasmJSFunctionDataMap"), - ("read_only_space", 0x03e6d): (270, "WasmResumeDataMap"), - ("read_only_space", 0x03e95): (272, "WasmTypeInfoMap"), - ("read_only_space", 0x03ebd): (268, "WasmContinuationObjectMap"), - ("read_only_space", 0x03ee5): (274, "WeakCellMap"), + ("read_only_space", 0x021fd): (154, "EnumCacheMap"), + ("read_only_space", 0x02231): (175, "FixedArrayMap"), + ("read_only_space", 0x0227d): (8, "OneByteInternalizedStringMap"), + ("read_only_space", 0x022c9): (252, "FreeSpaceMap"), + ("read_only_space", 0x022f1): (251, "OnePointerFillerMap"), + ("read_only_space", 0x02319): (251, "TwoPointerFillerMap"), + ("read_only_space", 0x02341): (131, "UninitializedMap"), + ("read_only_space", 0x023b9): (131, "UndefinedMap"), + ("read_only_space", 0x023fd): (130, "HeapNumberMap"), + ("read_only_space", 0x02431): (131, "TheHoleMap"), + ("read_only_space", 0x02491): (131, "BooleanMap"), + ("read_only_space", 0x02535): (190, "ByteArrayMap"), + ("read_only_space", 0x0255d): (175, "FixedCOWArrayMap"), + ("read_only_space", 0x02585): (176, "HashTableMap"), + ("read_only_space", 0x025ad): (128, "SymbolMap"), + ("read_only_space", 0x025d5): (40, "OneByteStringMap"), + ("read_only_space", 0x025fd): (261, "ScopeInfoMap"), + ("read_only_space", 0x02625): (262, "SharedFunctionInfoMap"), + ("read_only_space", 0x0264d): (245, "CodeMap"), + ("read_only_space", 0x02675): (244, "CellMap"), + ("read_only_space", 0x0269d): (260, "GlobalPropertyCellMap"), + ("read_only_space", 0x026c5): (204, "ForeignMap"), + ("read_only_space", 0x026ed): (241, "TransitionArrayMap"), + ("read_only_space", 0x02715): (45, "ThinOneByteStringMap"), + ("read_only_space", 0x0273d): (250, "FeedbackVectorMap"), + ("read_only_space", 0x02775): (131, "ArgumentsMarkerMap"), + ("read_only_space", 0x027d5): (131, "ExceptionMap"), + ("read_only_space", 0x02831): (131, "TerminationExceptionMap"), + ("read_only_space", 0x02899): (131, "OptimizedOutMap"), + ("read_only_space", 0x028f9): (131, "StaleRegisterMap"), + ("read_only_space", 0x02959): (189, "ScriptContextTableMap"), + ("read_only_space", 0x02981): (187, "ClosureFeedbackCellArrayMap"), + ("read_only_space", 0x029a9): (249, "FeedbackMetadataArrayMap"), + ("read_only_space", 0x029d1): (175, "ArrayListMap"), + ("read_only_space", 0x029f9): (129, "BigIntMap"), + ("read_only_space", 0x02a21): (188, "ObjectBoilerplateDescriptionMap"), + ("read_only_space", 0x02a49): (191, "BytecodeArrayMap"), + ("read_only_space", 0x02a71): (246, "CodeDataContainerMap"), + ("read_only_space", 0x02a99): (247, "CoverageInfoMap"), + ("read_only_space", 0x02ac1): (192, "FixedDoubleArrayMap"), + ("read_only_space", 0x02ae9): (178, "GlobalDictionaryMap"), + ("read_only_space", 0x02b11): (156, "ManyClosuresCellMap"), + ("read_only_space", 0x02b39): (256, "MegaDomHandlerMap"), + ("read_only_space", 0x02b61): (175, "ModuleInfoMap"), + ("read_only_space", 0x02b89): (179, "NameDictionaryMap"), + ("read_only_space", 0x02bb1): (156, "NoClosuresCellMap"), + ("read_only_space", 0x02bd9): (181, "NumberDictionaryMap"), + ("read_only_space", 0x02c01): (156, "OneClosureCellMap"), + ("read_only_space", 0x02c29): (182, "OrderedHashMapMap"), + ("read_only_space", 0x02c51): (183, "OrderedHashSetMap"), + ("read_only_space", 0x02c79): (180, "NameToIndexHashTableMap"), + ("read_only_space", 0x02ca1): (185, "RegisteredSymbolTableMap"), + ("read_only_space", 0x02cc9): (184, "OrderedNameDictionaryMap"), + ("read_only_space", 0x02cf1): (258, "PreparseDataMap"), + ("read_only_space", 0x02d19): (259, "PropertyArrayMap"), + ("read_only_space", 0x02d41): (242, "AccessorInfoMap"), + ("read_only_space", 0x02d69): (243, "SideEffectCallHandlerInfoMap"), + ("read_only_space", 0x02d91): (243, "SideEffectFreeCallHandlerInfoMap"), + ("read_only_space", 0x02db9): (243, "NextCallSideEffectFreeCallHandlerInfoMap"), + ("read_only_space", 0x02de1): (186, "SimpleNumberDictionaryMap"), + ("read_only_space", 0x02e09): (231, "SmallOrderedHashMapMap"), + ("read_only_space", 0x02e31): (232, "SmallOrderedHashSetMap"), + ("read_only_space", 0x02e59): (233, "SmallOrderedNameDictionaryMap"), + ("read_only_space", 0x02e81): (238, "SourceTextModuleMap"), + ("read_only_space", 0x02ea9): (266, "SwissNameDictionaryMap"), + ("read_only_space", 0x02ed1): (239, "SyntheticModuleMap"), + ("read_only_space", 0x02ef9): (267, "WasmApiFunctionRefMap"), + ("read_only_space", 0x02f21): (225, "WasmCapiFunctionDataMap"), + ("read_only_space", 0x02f49): (226, "WasmExportedFunctionDataMap"), + ("read_only_space", 0x02f71): (269, "WasmInternalFunctionMap"), + ("read_only_space", 0x02f99): (227, "WasmJSFunctionDataMap"), + ("read_only_space", 0x02fc1): (270, "WasmResumeDataMap"), + ("read_only_space", 0x02fe9): (272, "WasmTypeInfoMap"), + ("read_only_space", 0x03011): (268, "WasmContinuationObjectMap"), + ("read_only_space", 0x03039): (240, "WeakFixedArrayMap"), + ("read_only_space", 0x03061): (177, "EphemeronHashTableMap"), + ("read_only_space", 0x03089): (248, "EmbedderDataArrayMap"), + ("read_only_space", 0x030b1): (274, "WeakCellMap"), + ("read_only_space", 0x030d9): (32, "StringMap"), + ("read_only_space", 0x03101): (41, "ConsOneByteStringMap"), + ("read_only_space", 0x03129): (33, "ConsStringMap"), + ("read_only_space", 0x03151): (37, "ThinStringMap"), + ("read_only_space", 0x03179): (35, "SlicedStringMap"), + ("read_only_space", 0x031a1): (43, "SlicedOneByteStringMap"), + ("read_only_space", 0x031c9): (34, "ExternalStringMap"), + ("read_only_space", 0x031f1): (42, "ExternalOneByteStringMap"), + ("read_only_space", 0x03219): (50, "UncachedExternalStringMap"), + ("read_only_space", 0x03241): (0, "InternalizedStringMap"), + ("read_only_space", 0x03269): (2, "ExternalInternalizedStringMap"), + ("read_only_space", 0x03291): (10, "ExternalOneByteInternalizedStringMap"), + ("read_only_space", 0x032b9): (18, "UncachedExternalInternalizedStringMap"), + ("read_only_space", 0x032e1): (26, "UncachedExternalOneByteInternalizedStringMap"), + ("read_only_space", 0x03309): (58, "UncachedExternalOneByteStringMap"), + ("read_only_space", 0x03331): (104, "SharedOneByteStringMap"), + ("read_only_space", 0x03359): (96, "SharedStringMap"), + ("read_only_space", 0x03381): (106, "SharedExternalOneByteStringMap"), + ("read_only_space", 0x033a9): (98, "SharedExternalStringMap"), + ("read_only_space", 0x033d1): (122, "SharedUncachedExternalOneByteStringMap"), + ("read_only_space", 0x033f9): (114, "SharedUncachedExternalStringMap"), + ("read_only_space", 0x03421): (109, "SharedThinOneByteStringMap"), + ("read_only_space", 0x03449): (101, "SharedThinStringMap"), + ("read_only_space", 0x03471): (131, "SelfReferenceMarkerMap"), + ("read_only_space", 0x03499): (131, "BasicBlockCountersMarkerMap"), + ("read_only_space", 0x034dd): (146, "ArrayBoilerplateDescriptionMap"), + ("read_only_space", 0x035dd): (158, "InterceptorInfoMap"), + ("read_only_space", 0x07655): (132, "PromiseFulfillReactionJobTaskMap"), + ("read_only_space", 0x0767d): (133, "PromiseRejectReactionJobTaskMap"), + ("read_only_space", 0x076a5): (134, "CallableTaskMap"), + ("read_only_space", 0x076cd): (135, "CallbackTaskMap"), + ("read_only_space", 0x076f5): (136, "PromiseResolveThenableJobTaskMap"), + ("read_only_space", 0x0771d): (139, "FunctionTemplateInfoMap"), + ("read_only_space", 0x07745): (140, "ObjectTemplateInfoMap"), + ("read_only_space", 0x0776d): (141, "AccessCheckInfoMap"), + ("read_only_space", 0x07795): (142, "AccessorPairMap"), + ("read_only_space", 0x077bd): (143, "AliasedArgumentsEntryMap"), + ("read_only_space", 0x077e5): (144, "AllocationMementoMap"), + ("read_only_space", 0x0780d): (147, "AsmWasmDataMap"), + ("read_only_space", 0x07835): (148, "AsyncGeneratorRequestMap"), + ("read_only_space", 0x0785d): (149, "BreakPointMap"), + ("read_only_space", 0x07885): (150, "BreakPointInfoMap"), + ("read_only_space", 0x078ad): (151, "CallSiteInfoMap"), + ("read_only_space", 0x078d5): (152, "ClassPositionsMap"), + ("read_only_space", 0x078fd): (153, "DebugInfoMap"), + ("read_only_space", 0x07925): (155, "ErrorStackDataMap"), + ("read_only_space", 0x0794d): (157, "FunctionTemplateRareDataMap"), + ("read_only_space", 0x07975): (159, "InterpreterDataMap"), + ("read_only_space", 0x0799d): (160, "ModuleRequestMap"), + ("read_only_space", 0x079c5): (161, "PromiseCapabilityMap"), + ("read_only_space", 0x079ed): (162, "PromiseOnStackMap"), + ("read_only_space", 0x07a15): (163, "PromiseReactionMap"), + ("read_only_space", 0x07a3d): (164, "PropertyDescriptorObjectMap"), + ("read_only_space", 0x07a65): (165, "PrototypeInfoMap"), + ("read_only_space", 0x07a8d): (166, "RegExpBoilerplateDescriptionMap"), + ("read_only_space", 0x07ab5): (167, "ScriptMap"), + ("read_only_space", 0x07add): (168, "ScriptOrModuleMap"), + ("read_only_space", 0x07b05): (169, "SourceTextModuleInfoEntryMap"), + ("read_only_space", 0x07b2d): (170, "StackFrameInfoMap"), + ("read_only_space", 0x07b55): (171, "TemplateObjectDescriptionMap"), + ("read_only_space", 0x07b7d): (172, "Tuple2Map"), + ("read_only_space", 0x07ba5): (173, "WasmExceptionTagMap"), + ("read_only_space", 0x07bcd): (174, "WasmIndirectFunctionTableMap"), + ("read_only_space", 0x07bf5): (194, "SloppyArgumentsElementsMap"), + ("read_only_space", 0x07c1d): (236, "DescriptorArrayMap"), + ("read_only_space", 0x07c45): (222, "UncompiledDataWithoutPreparseDataMap"), + ("read_only_space", 0x07c6d): (220, "UncompiledDataWithPreparseDataMap"), + ("read_only_space", 0x07c95): (223, "UncompiledDataWithoutPreparseDataWithJobMap"), + ("read_only_space", 0x07cbd): (221, "UncompiledDataWithPreparseDataAndJobMap"), + ("read_only_space", 0x07ce5): (257, "OnHeapBasicBlockProfilerDataMap"), + ("read_only_space", 0x07d0d): (215, "TurbofanBitsetTypeMap"), + ("read_only_space", 0x07d35): (219, "TurbofanUnionTypeMap"), + ("read_only_space", 0x07d5d): (218, "TurbofanRangeTypeMap"), + ("read_only_space", 0x07d85): (216, "TurbofanHeapConstantTypeMap"), + ("read_only_space", 0x07dad): (217, "TurbofanOtherNumberConstantTypeMap"), + ("read_only_space", 0x07dd5): (198, "TurboshaftWord32TypeMap"), + ("read_only_space", 0x07dfd): (199, "TurboshaftWord32RangeTypeMap"), + ("read_only_space", 0x07e25): (200, "TurboshaftWord32SetTypeMap"), + ("read_only_space", 0x07e4d): (201, "TurboshaftWord64TypeMap"), + ("read_only_space", 0x07e75): (202, "TurboshaftWord64RangeTypeMap"), + ("read_only_space", 0x07e9d): (203, "TurboshaftWord64SetTypeMap"), + ("read_only_space", 0x07ec5): (195, "TurboshaftFloat64TypeMap"), + ("read_only_space", 0x07eed): (196, "TurboshaftFloat64RangeTypeMap"), + ("read_only_space", 0x07f15): (197, "TurboshaftFloat64SetTypeMap"), + ("read_only_space", 0x07f3d): (253, "InternalClassMap"), + ("read_only_space", 0x07f65): (264, "SmiPairMap"), + ("read_only_space", 0x07f8d): (263, "SmiBoxMap"), + ("read_only_space", 0x07fb5): (228, "ExportedSubClassBaseMap"), + ("read_only_space", 0x07fdd): (229, "ExportedSubClassMap"), + ("read_only_space", 0x08005): (234, "AbstractInternalClassSubclass1Map"), + ("read_only_space", 0x0802d): (235, "AbstractInternalClassSubclass2Map"), + ("read_only_space", 0x08055): (193, "InternalClassWithSmiElementsMap"), + ("read_only_space", 0x0807d): (254, "InternalClassWithStructElementsMap"), + ("read_only_space", 0x080a5): (230, "ExportedSubClass2Map"), + ("read_only_space", 0x080cd): (265, "SortStateMap"), + ("read_only_space", 0x080f5): (271, "WasmStringViewIterMap"), + ("read_only_space", 0x0811d): (145, "AllocationSiteWithWeakNextMap"), + ("read_only_space", 0x08145): (145, "AllocationSiteWithoutWeakNextMap"), + ("read_only_space", 0x08211): (137, "LoadHandler1Map"), + ("read_only_space", 0x08239): (137, "LoadHandler2Map"), + ("read_only_space", 0x08261): (137, "LoadHandler3Map"), + ("read_only_space", 0x08289): (138, "StoreHandler0Map"), + ("read_only_space", 0x082b1): (138, "StoreHandler1Map"), + ("read_only_space", 0x082d9): (138, "StoreHandler2Map"), + ("read_only_space", 0x08301): (138, "StoreHandler3Map"), ("old_space", 0x0438d): (2116, "ExternalMap"), ("old_space", 0x043b5): (2120, "JSMessageObjectMap"), } # List of known V8 objects. KNOWN_OBJECTS = { - ("read_only_space", 0x022a9): "EmptyFixedArray", - ("read_only_space", 0x022b1): "EmptyWeakFixedArray", - ("read_only_space", 0x022b9): "EmptyWeakArrayList", - ("read_only_space", 0x022c5): "NullValue", - ("read_only_space", 0x022e1): "UndefinedValue", - ("read_only_space", 0x022fd): "TheHoleValue", - ("read_only_space", 0x02a99): "EmptyEnumCache", - ("read_only_space", 0x02aa5): "EmptyDescriptorArray", - ("read_only_space", 0x03875): "InvalidPrototypeValidityCell", - ("read_only_space", 0x03f0d): "EmptyArrayList", - ("read_only_space", 0x03f19): "EmptyScopeInfo", - ("read_only_space", 0x03f29): "EmptyObjectBoilerplateDescription", - ("read_only_space", 0x03f35): "EmptyArrayBoilerplateDescription", - ("read_only_space", 0x03f41): "TrueValue", - ("read_only_space", 0x03f5d): "FalseValue", - ("read_only_space", 0x03f79): "EmptyByteArray", - ("read_only_space", 0x03f81): "EmptyPropertyArray", - ("read_only_space", 0x03f89): "EmptyClosureFeedbackCellArray", - ("read_only_space", 0x03f91): "NoOpInterceptorInfo", - ("read_only_space", 0x03fb9): "MinusZeroValue", - ("read_only_space", 0x03fc5): "NanValue", - ("read_only_space", 0x03fd1): "HoleNanValue", - ("read_only_space", 0x03fdd): "InfinityValue", - ("read_only_space", 0x03fe9): "MinusInfinityValue", - ("read_only_space", 0x03ff5): "MaxSafeInteger", - ("read_only_space", 0x04001): "MaxUInt32", - ("read_only_space", 0x0400d): "SmiMinValue", - ("read_only_space", 0x04019): "SmiMaxValuePlusOne", - ("read_only_space", 0x04025): "HashSeed", - ("read_only_space", 0x04035): "SingleCharacterStringTable", - ("read_only_space", 0x0543d): "empty_string", - ("read_only_space", 0x07b19): "UninitializedValue", - ("read_only_space", 0x07b51): "ArgumentsMarker", - ("read_only_space", 0x07b89): "TerminationException", - ("read_only_space", 0x07bc9): "Exception", - ("read_only_space", 0x07be5): "OptimizedOut", - ("read_only_space", 0x07c1d): "StaleRegister", - ("read_only_space", 0x07c55): "SelfReferenceMarker", - ("read_only_space", 0x07c95): "BasicBlockCountersMarker", - ("read_only_space", 0x081c9): "EmptyPropertyDictionary", - ("read_only_space", 0x081f1): "EmptySymbolTable", - ("read_only_space", 0x0820d): "EmptySlowElementDictionary", - ("read_only_space", 0x08231): "EmptyOrderedHashMap", - ("read_only_space", 0x08245): "EmptyOrderedHashSet", - ("read_only_space", 0x08259): "EmptyOrderedPropertyDictionary", - ("read_only_space", 0x0827d): "EmptySwissPropertyDictionary", - ("read_only_space", 0x0829d): "EmptyFeedbackMetadata", - ("read_only_space", 0x082a9): "GlobalThisBindingScopeInfo", - ("read_only_space", 0x082c9): "EmptyFunctionScopeInfo", - ("read_only_space", 0x082ed): "NativeScopeInfo", - ("read_only_space", 0x08305): "ShadowRealmScopeInfo", - ("read_only_space", 0x0831d): "OffHeapTrampolineRelocationInfo", + ("read_only_space", 0x021e1): "EmptyWeakArrayList", + ("read_only_space", 0x021ed): "EmptyDescriptorArray", + ("read_only_space", 0x02225): "EmptyEnumCache", + ("read_only_space", 0x02259): "EmptyFixedArray", + ("read_only_space", 0x02261): "NullValue", + ("read_only_space", 0x02369): "UninitializedValue", + ("read_only_space", 0x023e1): "UndefinedValue", + ("read_only_space", 0x02425): "NanValue", + ("read_only_space", 0x02459): "TheHoleValue", + ("read_only_space", 0x02485): "HoleNanValue", + ("read_only_space", 0x024b9): "TrueValue", + ("read_only_space", 0x024f9): "FalseValue", + ("read_only_space", 0x02529): "empty_string", + ("read_only_space", 0x02765): "EmptyScopeInfo", + ("read_only_space", 0x0279d): "ArgumentsMarker", + ("read_only_space", 0x027fd): "Exception", + ("read_only_space", 0x02859): "TerminationException", + ("read_only_space", 0x028c1): "OptimizedOut", + ("read_only_space", 0x02921): "StaleRegister", + ("read_only_space", 0x034c1): "EmptyPropertyArray", + ("read_only_space", 0x034c9): "EmptyByteArray", + ("read_only_space", 0x034d1): "EmptyObjectBoilerplateDescription", + ("read_only_space", 0x03505): "EmptyArrayBoilerplateDescription", + ("read_only_space", 0x03511): "EmptyClosureFeedbackCellArray", + ("read_only_space", 0x03519): "EmptySlowElementDictionary", + ("read_only_space", 0x0353d): "EmptyOrderedHashMap", + ("read_only_space", 0x03551): "EmptyOrderedHashSet", + ("read_only_space", 0x03565): "EmptyFeedbackMetadata", + ("read_only_space", 0x03571): "EmptyPropertyDictionary", + ("read_only_space", 0x03599): "EmptyOrderedPropertyDictionary", + ("read_only_space", 0x035b1): "EmptySwissPropertyDictionary", + ("read_only_space", 0x03605): "NoOpInterceptorInfo", + ("read_only_space", 0x0362d): "EmptyArrayList", + ("read_only_space", 0x03639): "EmptyWeakFixedArray", + ("read_only_space", 0x03641): "InvalidPrototypeValidityCell", + ("read_only_space", 0x03649): "InfinityValue", + ("read_only_space", 0x03655): "MinusZeroValue", + ("read_only_space", 0x03661): "MinusInfinityValue", + ("read_only_space", 0x0366d): "MaxSafeInteger", + ("read_only_space", 0x03679): "MaxUInt32", + ("read_only_space", 0x03685): "SmiMinValue", + ("read_only_space", 0x03691): "SmiMaxValuePlusOne", + ("read_only_space", 0x0369d): "SingleCharacterStringTable", + ("read_only_space", 0x04aa5): "SelfReferenceMarker", + ("read_only_space", 0x04ae5): "BasicBlockCountersMarker", + ("read_only_space", 0x04b29): "OffHeapTrampolineRelocationInfo", + ("read_only_space", 0x04b35): "GlobalThisBindingScopeInfo", + ("read_only_space", 0x04b65): "EmptyFunctionScopeInfo", + ("read_only_space", 0x04b89): "NativeScopeInfo", + ("read_only_space", 0x04ba1): "ShadowRealmScopeInfo", + ("read_only_space", 0x04bb9): "EmptySymbolTable", + ("read_only_space", 0x04bd5): "HashSeed", ("old_space", 0x0423d): "ArgumentsIteratorAccessor", ("old_space", 0x04255): "ArrayLengthAccessor", ("old_space", 0x0426d): "BoundFunctionLengthAccessor", From e6d1bea299fe04264289fbd8d6d36a21f9eeb147 Mon Sep 17 00:00:00 2001 From: Andreas Haas Date: Wed, 28 Dec 2022 15:15:30 +0100 Subject: [PATCH 105/654] [wasm] Add histogram for WebAssembly compilation methods This histogram should give us information on which compilation methods get used in the wild. R=clemensb@chromium.org, mlippautz@chromium.org Change-Id: I6906580c1b8df7a1dff9ce6c169c6861936857d9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128555 Reviewed-by: Clemens Backes Reviewed-by: Michael Lippautz Commit-Queue: Andreas Haas Cr-Commit-Position: refs/heads/main@{#85056} --- src/logging/counters-definitions.h | 3 ++- src/wasm/wasm-js.cc | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/logging/counters-definitions.h b/src/logging/counters-definitions.h index ec69b8b69c..a96e837d4a 100644 --- a/src/logging/counters-definitions.h +++ b/src/logging/counters-definitions.h @@ -114,7 +114,8 @@ namespace internal { /* kPartialSuccessor kAbortedDuringSweeping. See */ \ /* ExternalPointerTable::TableCompactionOutcome enum for more details */ \ HR(external_pointer_table_compaction_outcome, \ - V8.ExternalPointerTableCompactionOutcome, 0, 2, 3) + V8.ExternalPointerTableCompactionOutcome, 0, 2, 3) \ + HR(wasm_compilation_method, V8.WasmCompilationMethod, 0, 4, 5) #define NESTED_TIMED_HISTOGRAM_LIST(HT) \ /* Nested timer histograms allow distributions of nested timed results. */ \ diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc index 3d9f621535..5c4d6ed8ac 100644 --- a/src/wasm/wasm-js.cc +++ b/src/wasm/wasm-js.cc @@ -485,6 +485,20 @@ bool EnforceUint32(T argument_name, Local v, Local context, *res = static_cast(double_number); return true; } + +// The enum values need to match "WasmCompilationMethod" in +// tools/metrics/histograms/enums.xml. +enum CompilationMethod { + kSyncCompilation = 0, + kAsyncCompilation = 1, + kStreamingCompilation = 2, + kAsyncInstantiation = 3, + kStreamingInstantiation = 4, +}; + +void RecordCompilationMethod(i::Isolate* isolate, CompilationMethod method) { + isolate->counters()->wasm_compilation_method()->AddSample(method); +} } // namespace // WebAssembly.compile(bytes) -> Promise @@ -492,6 +506,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo& args) { constexpr const char* kAPIMethodName = "WebAssembly.compile()"; v8::Isolate* isolate = args.GetIsolate(); i::Isolate* i_isolate = reinterpret_cast(isolate); + RecordCompilationMethod(i_isolate, kAsyncCompilation); HandleScope scope(isolate); ScheduledErrorThrower thrower(i_isolate, kAPIMethodName); @@ -561,6 +576,7 @@ void WebAssemblyCompileStreaming( const v8::FunctionCallbackInfo& args) { v8::Isolate* isolate = args.GetIsolate(); i::Isolate* i_isolate = reinterpret_cast(isolate); + RecordCompilationMethod(i_isolate, kStreamingCompilation); HandleScope scope(isolate); const char* const kAPIMethodName = "WebAssembly.compileStreaming()"; ScheduledErrorThrower thrower(i_isolate, kAPIMethodName); @@ -680,6 +696,7 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo& args) { v8::Isolate* isolate = args.GetIsolate(); i::Isolate* i_isolate = reinterpret_cast(isolate); if (i_isolate->wasm_module_callback()(args)) return; + RecordCompilationMethod(i_isolate, kSyncCompilation); HandleScope scope(isolate); ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()"); @@ -795,6 +812,7 @@ void WebAssemblyModuleCustomSections( void WebAssemblyInstance(const v8::FunctionCallbackInfo& args) { Isolate* isolate = args.GetIsolate(); i::Isolate* i_isolate = reinterpret_cast(isolate); + RecordCompilationMethod(i_isolate, kAsyncInstantiation); i_isolate->CountUsage( v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation); @@ -856,6 +874,7 @@ void WebAssemblyInstantiateStreaming( const v8::FunctionCallbackInfo& args) { v8::Isolate* isolate = args.GetIsolate(); i::Isolate* i_isolate = reinterpret_cast(isolate); + RecordCompilationMethod(i_isolate, kStreamingInstantiation); i_isolate->CountUsage( v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation); From cabbc128e40a0ab747b1005200af0c5004b8c171 Mon Sep 17 00:00:00 2001 From: pthier Date: Mon, 2 Jan 2023 14:04:55 +0100 Subject: [PATCH 106/654] [string] Don't overwrite original string in InternalizedStringKey When internalizing external strings, a new internalized external string object is allocated if the string is not in-place internalizable. This newly allocated strings external resource is set to null (the actual resource will be transferred by MakeThin to ensure unique ownership of the resource). We need to preserve the original string in the InternalizedStringKey for the second lookup (inside the critical section), as we need to access the external resource in case of hash collisions to check for equality. Bug: chromium:1402187 Change-Id: I62b637859b06f05d1b34cb26495f08ec44d2f2db Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128089 Reviewed-by: Leszek Swirski Commit-Queue: Patrick Thier Cr-Commit-Position: refs/heads/main@{#85057} --- src/objects/string-table.cc | 17 ++++++++++----- test/cctest/test-strings.cc | 41 +++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/src/objects/string-table.cc b/src/objects/string-table.cc index f87e5eb8e6..00aa76a4a2 100644 --- a/src/objects/string-table.cc +++ b/src/objects/string-table.cc @@ -375,6 +375,7 @@ class InternalizedStringKey final : public StringTableKey { // We can see already internalized strings here only when sharing the // string table and allowing concurrent internalization. DCHECK(v8_flags.shared_string_table); + internalized_string_ = string_; return; } @@ -396,7 +397,7 @@ class InternalizedStringKey final : public StringTableKey { // original string is not transitioned to a ThinString (setting the // resource) immediately. DCHECK(!shape.IsShared()); - string_ = + internalized_string_ = isolate->factory()->InternalizeExternalString( string_); } else if (can_avoid_copy && shape.IsExternalTwoByte()) { @@ -406,13 +407,13 @@ class InternalizedStringKey final : public StringTableKey { // original string is not transitioned to a ThinString (setting the // resource) immediately. DCHECK(!shape.IsShared()); - string_ = + internalized_string_ = isolate->factory()->InternalizeExternalString( string_); } else { // Otherwise allocate a new internalized string. - string_ = isolate->factory()->NewInternalizedStringImpl(string_, length(), - raw_hash_field()); + internalized_string_ = isolate->factory()->NewInternalizedStringImpl( + string_, length(), raw_hash_field()); } } @@ -435,11 +436,17 @@ class InternalizedStringKey final : public StringTableKey { // in-place migrate the original string instead of internalizing the copy // and migrating the original string to a ThinString. This scenario doesn't // seem to be common enough to justify re-computing the strategy here. - return string_; + return internalized_string_.ToHandleChecked(); } private: Handle string_; + // Copy of the string to be internalized (only set if the string is not + // in-place internalizable). We can't override the original string, as + // internalized external strings don't set the resource directly (deferred to + // MakeThin to ensure unique ownership of the resource), and thus would break + // equality checks in case of hash collisions. + MaybeHandle internalized_string_; MaybeHandle maybe_internalized_map_; }; diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc index 837d42669a..1f12175879 100644 --- a/test/cctest/test-strings.cc +++ b/test/cctest/test-strings.cc @@ -1393,6 +1393,47 @@ TEST(InternalizeExternal) { CcTest::CollectGarbage(i::OLD_SPACE); } +TEST(Regress1402187) { + CcTest::InitializeVM(); + i::Isolate* isolate = CcTest::i_isolate(); + Factory* factory = isolate->factory(); + // This won't leak; the external string mechanism will call Dispose() on it. + const char ext_string_content[] = "prop-1234567"; + OneByteVectorResource* resource = new OneByteVectorResource( + v8::base::Vector(ext_string_content, 12)); + const uint32_t fake_hash = + String::CreateHashFieldValue(4711, String::HashFieldType::kHash); + { + v8::HandleScope scope(CcTest::isolate()); + // Internalize a string with the same hash to ensure collision. + Handle intern = isolate->factory()->NewStringFromAsciiChecked( + "internalized", AllocationType::kOld); + intern->set_raw_hash_field(fake_hash); + factory->InternalizeName(intern); + CHECK(intern->IsInternalizedString()); + + v8::Local ext_string = + v8::String::NewFromUtf8Literal(CcTest::isolate(), ext_string_content); + ext_string->MakeExternal(resource); + Handle string = v8::Utils::OpenHandle(*ext_string); + string->set_raw_hash_field(fake_hash); + CHECK(string->IsExternalString()); + CHECK(!string->IsInternalizedString()); + CHECK(!String::Equals(isolate, string, intern)); + CHECK_EQ(string->hash(), intern->hash()); + CHECK_EQ(string->length(), intern->length()); + + CHECK_EQ(isolate->string_table()->TryStringToIndexOrLookupExisting( + isolate, string->ptr()), + Smi::FromInt(ResultSentinel::kNotFound).ptr()); + string = factory->InternalizeString(string); + CHECK(string->IsExternalString()); + CHECK(string->IsInternalizedString()); + } + CcTest::CollectGarbage(i::OLD_SPACE); + CcTest::CollectGarbage(i::OLD_SPACE); +} + TEST(SliceFromExternal) { if (!v8_flags.string_slices) return; CcTest::InitializeVM(); From 06e469190cd506419efde7c6c9f25a3823844f6c Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 2 Jan 2023 15:22:20 +0100 Subject: [PATCH 107/654] [maglev] Fix RootConstant::ToBoolean for root heap nums ToBoolean was wrong for a couple of root heap numbers (namely, NaN, holey NaN, and minus zero. Fix this, and add an exhaustive test of root constant ToBoolean values. Bug: v8:7700 Change-Id: I6939c6eb5130cb8a3a4f7007b1a0a1dcc415e8b2 Fixed: chromium:1403740 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128524 Auto-Submit: Leszek Swirski Commit-Queue: Leszek Swirski Commit-Queue: Victor Gomes Reviewed-by: Victor Gomes Cr-Commit-Position: refs/heads/main@{#85058} --- src/maglev/maglev-ir.cc | 43 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 21067a195d..4032ad8e42 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -138,13 +138,14 @@ size_t GetInputLocationsArraySize(const DeoptFrame& top_frame) { return size; } -} // namespace - -bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const { - switch (index_) { +bool RootToBoolean(RootIndex index) { + switch (index) { case RootIndex::kFalseValue: case RootIndex::kNullValue: case RootIndex::kUndefinedValue: + case RootIndex::kNanValue: + case RootIndex::kHoleNanValue: + case RootIndex::kMinusZeroValue: case RootIndex::kempty_string: return false; default: @@ -152,6 +153,40 @@ bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const { } } +#ifdef DEBUG +// For all RO roots, check that RootToBoolean returns the same value as +// BooleanValue on that root. +bool CheckToBooleanOnAllRoots(LocalIsolate* local_isolate) { + ReadOnlyRoots roots(local_isolate); + // Use the READ_ONLY_ROOT_LIST macro list rather than a for loop to get nicer + // error messages if there is a failure. +#define DO_CHECK(type, name, CamelName) \ + /* Ignore 'undefined' roots that are not the undefined value itself. */ \ + if (roots.name() != roots.undefined_value() || \ + RootIndex::k##CamelName == RootIndex::kUndefinedValue) { \ + DCHECK_EQ(roots.name().BooleanValue(local_isolate), \ + RootToBoolean(RootIndex::k##CamelName)); \ + } + READ_ONLY_ROOT_LIST(DO_CHECK) +#undef DO_CHECK + return true; +} +#endif + +} // namespace + +bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const { +#ifdef DEBUG + // (Ab)use static locals to call CheckToBooleanOnAllRoots once, on first + // call to this function. + static bool check_once = CheckToBooleanOnAllRoots(local_isolate); + DCHECK(check_once); +#endif + // ToBoolean is only supported for RO roots. + DCHECK(RootsTable::IsReadOnly(index_)); + return RootToBoolean(index_); +} + DeoptInfo::DeoptInfo(Zone* zone, DeoptFrame top_frame, compiler::FeedbackSource feedback_to_update) : top_frame_(top_frame), From 3d921a0afb304c3be1726b7ea9b02bfd242c9bbd Mon Sep 17 00:00:00 2001 From: Darius M Date: Mon, 2 Jan 2023 14:14:58 +0100 Subject: [PATCH 108/654] [maglev][arm64] Implement StoreMap/StoreDoubleField IRs Bug: v8:7700 Change-Id: Ic2bb43b4fb34aa6221bf04d90517ecc3a3482b8a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128523 Auto-Submit: Darius Mercadier Commit-Queue: Victor Gomes Reviewed-by: Victor Gomes Cr-Commit-Position: refs/heads/main@{#85059} --- src/maglev/arm64/maglev-ir-arm64.cc | 78 +++++++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 3 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index f4dc142fa1..fad9c0d886 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -135,8 +135,6 @@ UNIMPLEMENTED_NODE(CheckJSDataViewBounds, element_type_) UNIMPLEMENTED_NODE(CheckJSObjectElementsBounds) UNIMPLEMENTED_NODE(CheckJSTypedArrayBounds, elements_kind_) UNIMPLEMENTED_NODE_WITH_CALL(JumpLoopPrologue, loop_depth_, unit_) -UNIMPLEMENTED_NODE_WITH_CALL(StoreMap) -UNIMPLEMENTED_NODE(StoreDoubleField) UNIMPLEMENTED_NODE(StoreSignedIntDataViewElement, type_) UNIMPLEMENTED_NODE(StoreDoubleDataViewElement) @@ -1239,7 +1237,6 @@ void CheckInt32IsSmi::GenerateCode(MaglevAssembler* masm, void CheckedSmiTagInt32::SetValueLocationConstraints() { UseRegister(input()); DefineAsRegister(this); - set_temporaries_needed(1); } void CheckedSmiTagInt32::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { @@ -1640,6 +1637,81 @@ void LoadDoubleElement::GenerateCode(MaglevAssembler* masm, FieldMemOperand(elements, FixedArray::kHeaderSize)); } +void StoreDoubleField::SetValueLocationConstraints() { + UseRegister(object_input()); + UseRegister(value_input()); +} +void StoreDoubleField::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(object_input()); + DoubleRegister value = ToDoubleRegister(value_input()); + + UseScratchRegisterScope temps(masm); + Register tmp = temps.AcquireX(); + + __ AssertNotSmi(object); + __ DecompressAnyTagged(tmp, FieldMemOperand(object, offset())); + __ AssertNotSmi(tmp); + __ Move(FieldMemOperand(tmp, HeapNumber::kValueOffset), value); +} + +int StoreMap::MaxCallStackArgs() const { + return WriteBarrierDescriptor::GetStackParameterCount(); +} +void StoreMap::SetValueLocationConstraints() { + UseFixed(object_input(), WriteBarrierDescriptor::ObjectRegister()); + set_temporaries_needed(1); +} +void StoreMap::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + // TODO(leszeks): Consider making this an arbitrary register and push/popping + // in the deferred path. + Register object = WriteBarrierDescriptor::ObjectRegister(); + DCHECK_EQ(object, ToRegister(object_input())); + + __ AssertNotSmi(object); + // Since {value} will be passed to deferred code, we have to use a general + // temporary for it, rather than the regular scratch registers. + Register value = general_temporaries().PopFirst(); + __ Move(value, map_.object()); + __ StoreTaggedField(value, FieldMemOperand(object, HeapObject::kMapOffset)); + + ZoneLabelRef done(masm); + DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode( + [](MaglevAssembler* masm, ZoneLabelRef done, Register value, + Register object, StoreMap* node) { + ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); + __ CheckPageFlag( + value, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, + eq, *done); + + Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister(); + RegList saved; + if (node->register_snapshot().live_registers.has(slot_reg)) { + saved.set(slot_reg); + } + + __ PushAll(saved); + __ Add(slot_reg, object, HeapObject::kMapOffset - kHeapObjectTag); + + SaveFPRegsMode const save_fp_mode = + !node->register_snapshot().live_double_registers.is_empty() + ? SaveFPRegsMode::kSave + : SaveFPRegsMode::kIgnore; + + __ CallRecordWriteStub(object, slot_reg, save_fp_mode); + + __ PopAll(saved); + __ B(*done); + }, + done, value, object, this); + + __ JumpIfSmi(value, *done); + __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, ne, + &deferred_write_barrier->deferred_code_label); + __ bind(*done); +} + int StoreTaggedFieldWithWriteBarrier::MaxCallStackArgs() const { return WriteBarrierDescriptor::GetStackParameterCount(); } From d43b93a7ac93bbc5ac00bc5542222fa08757d19d Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 2 Jan 2023 15:21:19 +0100 Subject: [PATCH 109/654] [maglev] Use NaN for StoreDataView with no args. StoreDataView with no args should store NaN, not zero. Bug: v8:7700 Change-Id: I9688465fea2ac1a88f0bff2a7b7d1c419dc7e43e Fixed: chromium:1403743 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4127165 Commit-Queue: Leszek Swirski Auto-Submit: Leszek Swirski Commit-Queue: Victor Gomes Reviewed-by: Victor Gomes Cr-Commit-Position: refs/heads/main@{#85060} --- src/maglev/maglev-graph-builder.cc | 4 +++- src/maglev/maglev-graph-builder.h | 2 +- src/maglev/maglev-graph-processor.h | 4 +++- src/maglev/maglev-regalloc.cc | 4 +++- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index ee389cbafe..9ad23aa434 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -3106,7 +3106,9 @@ ValueNode* MaglevGraphBuilder::TryReduceDataViewPrototypeSetFloat64( compiler::JSFunctionRef target, CallArguments& args) { return TryBuildStoreDataView( args, ExternalArrayType::kExternalFloat64Array, [&](ValueNode* value) { - return value ? GetFloat64(value) : GetFloat64Constant(0); + return value ? GetFloat64(value) + : GetFloat64Constant( + std::numeric_limits::quiet_NaN()); }); } diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index 7410665822..195f9ee59e 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -628,7 +628,7 @@ class MaglevGraphBuilder { } Float64Constant* GetFloat64Constant(double constant) { - if (constant != constant) { + if (std::isnan(constant)) { if (graph_->nan() == nullptr) { graph_->set_nan(CreateNewNode(0, constant)); } diff --git a/src/maglev/maglev-graph-processor.h b/src/maglev/maglev-graph-processor.h index b6ff380cf5..9243d6cf15 100644 --- a/src/maglev/maglev-graph-processor.h +++ b/src/maglev/maglev-graph-processor.h @@ -90,7 +90,9 @@ class GraphProcessor { node_processor_.Process(constant, GetCurrentState()); USE(index); } - + if (graph_->nan()) { + node_processor_.Process(graph_->nan(), GetCurrentState()); + } for (const auto& [address, constant] : graph->external_references()) { node_processor_.Process(constant, GetCurrentState()); USE(address); diff --git a/src/maglev/maglev-regalloc.cc b/src/maglev/maglev-regalloc.cc index 19f0cf5b68..87fe6045e0 100644 --- a/src/maglev/maglev-regalloc.cc +++ b/src/maglev/maglev-regalloc.cc @@ -333,7 +333,9 @@ void StraightForwardRegisterAllocator::AllocateRegisters() { constant->SetConstantLocation(); USE(value); } - + if (graph_->nan()) { + graph_->nan()->SetConstantLocation(); + } for (const auto& [address, constant] : graph_->external_references()) { constant->SetConstantLocation(); USE(address); From a8a1805e126d45ea635f332df26f786c67787419 Mon Sep 17 00:00:00 2001 From: Michael Lippautz Date: Mon, 2 Jan 2023 15:12:15 +0100 Subject: [PATCH 110/654] [api, heap, handles] Remove deprecated EmbedderHeapTracer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This removes EmbedderHeapTracer from V8's API. Going forward v8::TracedReference is only supported with using CppHeap (Oilpan). Bug: v8:13207 Change-Id: I4e0efa94890ed147293b5df69fd7e0edad45abb5 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111546 Reviewed-by: Dominik Inführ Commit-Queue: Michael Lippautz Cr-Commit-Position: refs/heads/main@{#85061} --- include/v8-embedder-heap.h | 169 ----- include/v8-isolate.h | 23 +- include/v8-traced-handle.h | 15 +- src/DEPS | 1 + src/api/api.cc | 81 --- src/handles/traced-handles.cc | 36 +- src/handles/traced-handles.h | 8 - src/heap/cppgc-js/cpp-heap.cc | 3 +- src/heap/embedder-tracing.cc | 156 +---- src/heap/embedder-tracing.h | 96 +-- src/heap/gc-tracer.h | 4 +- src/heap/heap.cc | 40 -- src/heap/heap.h | 10 - src/heap/incremental-marking.cc | 22 +- src/heap/mark-compact-inl.h | 10 - src/heap/mark-compact.cc | 27 +- src/heap/mark-compact.h | 3 - test/unittests/BUILD.gn | 1 - .../heap/embedder-tracing-unittest.cc | 607 ------------------ test/unittests/heap/heap-utils.h | 20 - 20 files changed, 68 insertions(+), 1264 deletions(-) delete mode 100644 test/unittests/heap/embedder-tracing-unittest.cc diff --git a/include/v8-embedder-heap.h b/include/v8-embedder-heap.h index f994cdfdf3..9e2e3ef58c 100644 --- a/include/v8-embedder-heap.h +++ b/include/v8-embedder-heap.h @@ -5,27 +5,14 @@ #ifndef INCLUDE_V8_EMBEDDER_HEAP_H_ #define INCLUDE_V8_EMBEDDER_HEAP_H_ -#include -#include - -#include -#include - -#include "cppgc/common.h" -#include "v8-local-handle.h" // NOLINT(build/include_directory) #include "v8-traced-handle.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) namespace v8 { -class Data; class Isolate; class Value; -namespace internal { -class LocalEmbedderHeapTracer; -} // namespace internal - /** * Handler for embedder roots on non-unified heap garbage collections. */ @@ -62,162 +49,6 @@ class V8_EXPORT EmbedderRootsHandler { virtual void ResetRoot(const v8::TracedReference& handle) = 0; }; -/** - * Interface for tracing through the embedder heap. During a V8 garbage - * collection, V8 collects hidden fields of all potential wrappers, and at the - * end of its marking phase iterates the collection and asks the embedder to - * trace through its heap and use reporter to report each JavaScript object - * reachable from any of the given wrappers. - */ -class V8_EXPORT -// GCC doesn't like combining __attribute__(()) with [[deprecated]]. -#ifdef __clang__ -V8_DEPRECATED("Use CppHeap when working with v8::TracedReference.") -#endif // __clang__ - EmbedderHeapTracer { - public: - using EmbedderStackState = cppgc::EmbedderStackState; - - enum TraceFlags : uint64_t { - kNoFlags = 0, - kReduceMemory = 1 << 0, - kForced = 1 << 2, - }; - - /** - * Interface for iterating through |TracedReference| handles. - */ - class V8_EXPORT TracedGlobalHandleVisitor { - public: - virtual ~TracedGlobalHandleVisitor() = default; - virtual void VisitTracedReference(const TracedReference& handle) {} - }; - - /** - * Summary of a garbage collection cycle. See |TraceEpilogue| on how the - * summary is reported. - */ - struct TraceSummary { - /** - * Time spent managing the retained memory in milliseconds. This can e.g. - * include the time tracing through objects in the embedder. - */ - double time = 0.0; - - /** - * Memory retained by the embedder through the |EmbedderHeapTracer| - * mechanism in bytes. - */ - size_t allocated_size = 0; - }; - - virtual ~EmbedderHeapTracer() = default; - - /** - * Iterates all |TracedReference| handles created for the |v8::Isolate| the - * tracer is attached to. - */ - void IterateTracedGlobalHandles(TracedGlobalHandleVisitor* visitor); - - /** - * Called by the embedder to set the start of the stack which is e.g. used by - * V8 to determine whether handles are used from stack or heap. - */ - void SetStackStart(void* stack_start); - - /** - * Called by v8 to register internal fields of found wrappers. - * - * The embedder is expected to store them somewhere and trace reachable - * wrappers from them when called through |AdvanceTracing|. - */ - virtual void RegisterV8References( - const std::vector>& embedder_fields) = 0; - - void RegisterEmbedderReference(const BasicTracedReference& ref); - - /** - * Called at the beginning of a GC cycle. - */ - virtual void TracePrologue(TraceFlags flags) {} - - /** - * Called to advance tracing in the embedder. - * - * The embedder is expected to trace its heap starting from wrappers reported - * by RegisterV8References method, and report back all reachable wrappers. - * Furthermore, the embedder is expected to stop tracing by the given - * deadline. A deadline of infinity means that tracing should be finished. - * - * Returns |true| if tracing is done, and false otherwise. - */ - virtual bool AdvanceTracing(double deadline_in_ms) = 0; - - /* - * Returns true if there no more tracing work to be done (see AdvanceTracing) - * and false otherwise. - */ - virtual bool IsTracingDone() = 0; - - /** - * Called at the end of a GC cycle. - * - * Note that allocation is *not* allowed within |TraceEpilogue|. Can be - * overriden to fill a |TraceSummary| that is used by V8 to schedule future - * garbage collections. - */ - virtual void TraceEpilogue(TraceSummary* trace_summary) {} - - /** - * Called upon entering the final marking pause. No more incremental marking - * steps will follow this call. - */ - virtual void EnterFinalPause(EmbedderStackState stack_state) = 0; - - /* - * Called by the embedder to request immediate finalization of the currently - * running tracing phase that has been started with TracePrologue and not - * yet finished with TraceEpilogue. - * - * Will be a noop when currently not in tracing. - * - * This is an experimental feature. - */ - void FinalizeTracing(); - - /** - * See documentation on EmbedderRootsHandler. - */ - virtual bool IsRootForNonTracingGC( - const v8::TracedReference& handle); - - /** - * See documentation on EmbedderRootsHandler. - */ - virtual void ResetHandleInNonTracingGC( - const v8::TracedReference& handle); - - /* - * Called by the embedder to signal newly allocated or freed memory. Not bound - * to tracing phases. Embedders should trade off when increments are reported - * as V8 may consult global heuristics on whether to trigger garbage - * collection on this change. - */ - void IncreaseAllocatedSize(size_t bytes); - void DecreaseAllocatedSize(size_t bytes); - - /* - * Returns the v8::Isolate this tracer is attached too and |nullptr| if it - * is not attached to any v8::Isolate. - */ - v8::Isolate* isolate() const { return v8_isolate_; } - - protected: - v8::Isolate* v8_isolate_ = nullptr; - - friend class internal::LocalEmbedderHeapTracer; -}; - } // namespace v8 #endif // INCLUDE_V8_EMBEDDER_HEAP_H_ diff --git a/include/v8-isolate.h b/include/v8-isolate.h index 9659300751..bae459630b 100644 --- a/include/v8-isolate.h +++ b/include/v8-isolate.h @@ -924,27 +924,10 @@ class V8_EXPORT Isolate { void RemoveGCPrologueCallback(GCCallbackWithData, void* data = nullptr); void RemoveGCPrologueCallback(GCCallback callback); - START_ALLOW_USE_DEPRECATED() - /** - * Sets the embedder heap tracer for the isolate. - * SetEmbedderHeapTracer cannot be used simultaneously with AttachCppHeap. - */ - void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer); - - /* - * Gets the currently active heap tracer for the isolate that was set with - * SetEmbedderHeapTracer. - */ - EmbedderHeapTracer* GetEmbedderHeapTracer(); - END_ALLOW_USE_DEPRECATED() - /** * Sets an embedder roots handle that V8 should consider when performing - * non-unified heap garbage collections. - * - * Using only EmbedderHeapTracer automatically sets up a default handler. - * The intended use case is for setting a custom handler after invoking - * `AttachCppHeap()`. + * non-unified heap garbage collections. The intended use case is for setting + * a custom handler after invoking `AttachCppHeap()`. * * V8 does not take ownership of the handler. */ @@ -955,8 +938,6 @@ class V8_EXPORT Isolate { * embedder maintains ownership of the CppHeap. At most one C++ heap can be * attached to V8. * - * AttachCppHeap cannot be used simultaneously with SetEmbedderHeapTracer. - * * Multi-threaded use requires the use of v8::Locker/v8::Unlocker, see * CppHeap. */ diff --git a/include/v8-traced-handle.h b/include/v8-traced-handle.h index e0fd57c49d..784016b37c 100644 --- a/include/v8-traced-handle.h +++ b/include/v8-traced-handle.h @@ -117,11 +117,11 @@ class TracedReferenceBase { /** * A traced handle with copy and move semantics. The handle is to be used - * together with |v8::EmbedderHeapTracer| or as part of GarbageCollected objects - * (see v8-cppgc.h) and specifies edges from C++ objects to JavaScript. + * together as part of GarbageCollected objects (see v8-cppgc.h) or from stack + * and specifies edges from C++ objects to JavaScript. * * The exact semantics are: - * - Tracing garbage collections use |v8::EmbedderHeapTracer| or cppgc. + * - Tracing garbage collections using CppHeap. * - Non-tracing garbage collections refer to * |v8::EmbedderRootsHandler::IsRoot()| whether the handle should * be treated as root or not. @@ -166,7 +166,6 @@ class BasicTracedReference : public TracedReferenceBase { Isolate* isolate, T* that, void* slot, internal::GlobalHandleStoreMode store_mode); - friend class EmbedderHeapTracer; template friend class Local; friend class Object; @@ -181,13 +180,7 @@ class BasicTracedReference : public TracedReferenceBase { /** * A traced handle without destructor that clears the handle. The embedder needs * to ensure that the handle is not accessed once the V8 object has been - * reclaimed. This can happen when the handle is not passed through the - * EmbedderHeapTracer. For more details see BasicTracedReference. - * - * The reference assumes the embedder has precise knowledge about references at - * all times. In case V8 needs to separately handle on-stack references, the - * embedder is required to set the stack start through - * |EmbedderHeapTracer::SetStackStart|. + * reclaimed. For more details see BasicTracedReference. */ template class TracedReference : public BasicTracedReference { diff --git a/src/DEPS b/src/DEPS index 8912d7fb25..aeb32f23a1 100644 --- a/src/DEPS +++ b/src/DEPS @@ -76,6 +76,7 @@ include_rules = [ "+starboard", # Using cppgc inside v8 is not (yet) allowed. "-include/cppgc", + "+include/cppgc/common.h", "+include/cppgc/platform.h", "+include/cppgc/source-location.h", ] diff --git a/src/api/api.cc b/src/api/api.cc index bfba6d66c7..de12398f39 100644 --- a/src/api/api.cc +++ b/src/api/api.cc @@ -8874,21 +8874,6 @@ void Isolate::RemoveGCEpilogueCallback(GCCallback callback) { RemoveGCEpilogueCallback(CallGCCallbackWithoutData, data); } -START_ALLOW_USE_DEPRECATED() - -void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) { - i::Isolate* i_isolate = reinterpret_cast(this); - CHECK_NULL(i_isolate->heap()->cpp_heap()); - i_isolate->heap()->SetEmbedderHeapTracer(tracer); -} - -EmbedderHeapTracer* Isolate::GetEmbedderHeapTracer() { - i::Isolate* i_isolate = reinterpret_cast(this); - return i_isolate->heap()->GetEmbedderHeapTracer(); -} - -END_ALLOW_USE_DEPRECATED() - void Isolate::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) { i::Isolate* i_isolate = reinterpret_cast(this); i_isolate->heap()->SetEmbedderRootsHandler(handler); @@ -8896,7 +8881,6 @@ void Isolate::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) { void Isolate::AttachCppHeap(CppHeap* cpp_heap) { i::Isolate* i_isolate = reinterpret_cast(this); - CHECK_NULL(GetEmbedderHeapTracer()); i_isolate->heap()->AttachCppHeap(cpp_heap); } @@ -10705,71 +10689,6 @@ void HeapProfiler::SetGetDetachednessCallback(GetDetachednessCallback callback, data); } -void EmbedderHeapTracer::SetStackStart(void* stack_start) { - CHECK(v8_isolate_); - reinterpret_cast(v8_isolate_) - ->heap() - ->SetStackStart(stack_start); -} - -void EmbedderHeapTracer::FinalizeTracing() { - if (v8_isolate_) { - i::Isolate* i_isolate = reinterpret_cast(v8_isolate_); - if (i_isolate->heap()->incremental_marking()->IsMarking()) { - i_isolate->heap()->FinalizeIncrementalMarkingAtomically( - i::GarbageCollectionReason::kExternalFinalize); - } - } -} - -void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) { - if (v8_isolate_) { - i::LocalEmbedderHeapTracer* const tracer = - reinterpret_cast(v8_isolate_) - ->heap() - ->local_embedder_heap_tracer(); - DCHECK_NOT_NULL(tracer); - tracer->IncreaseAllocatedSize(bytes); - } -} - -void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) { - if (v8_isolate_) { - i::LocalEmbedderHeapTracer* const tracer = - reinterpret_cast(v8_isolate_) - ->heap() - ->local_embedder_heap_tracer(); - DCHECK_NOT_NULL(tracer); - tracer->DecreaseAllocatedSize(bytes); - } -} - -void EmbedderHeapTracer::RegisterEmbedderReference( - const BasicTracedReference& ref) { - if (ref.IsEmpty()) return; - - i::Heap* const heap = reinterpret_cast(v8_isolate_)->heap(); - heap->RegisterExternallyReferencedObject( - reinterpret_cast(ref.val_)); -} - -void EmbedderHeapTracer::IterateTracedGlobalHandles( - TracedGlobalHandleVisitor* visitor) { - i::Isolate* i_isolate = reinterpret_cast(v8_isolate_); - i::DisallowGarbageCollection no_gc; - i_isolate->traced_handles()->Iterate(visitor); -} - -bool EmbedderHeapTracer::IsRootForNonTracingGC( - const v8::TracedReference& handle) { - return true; -} - -void EmbedderHeapTracer::ResetHandleInNonTracingGC( - const v8::TracedReference& handle) { - UNREACHABLE(); -} - EmbedderStateScope::EmbedderStateScope(Isolate* v8_isolate, Local context, EmbedderStateTag tag) diff --git a/src/handles/traced-handles.cc b/src/handles/traced-handles.cc index 4c2961d600..82e5a3eb6c 100644 --- a/src/handles/traced-handles.cc +++ b/src/handles/traced-handles.cc @@ -539,12 +539,6 @@ class TracedHandlesImpl final { size_t used_size_bytes() const { return sizeof(TracedNode) * used_nodes_; } size_t total_size_bytes() const { return block_size_bytes_; } - START_ALLOW_USE_DEPRECATED() - - void Iterate(v8::EmbedderHeapTracer::TracedGlobalHandleVisitor* visitor); - - END_ALLOW_USE_DEPRECATED() - private: TracedNode* AllocateNode(); void FreeNode(TracedNode*); @@ -894,6 +888,8 @@ void TracedHandlesImpl::ComputeWeaknessForYoungObjects( if (is_marking_) return; auto* const handler = isolate_->heap()->GetEmbedderRootsHandler(); + if (!handler) return; + for (TracedNode* node : young_nodes_) { if (node->is_in_use()) { DCHECK(node->is_root()); @@ -912,6 +908,8 @@ void TracedHandlesImpl::ProcessYoungObjects( if (!v8_flags.reclaim_unmodified_wrappers) return; auto* const handler = isolate_->heap()->GetEmbedderRootsHandler(); + if (!handler) return; + for (TracedNode* node : young_nodes_) { if (!node->is_in_use()) continue; @@ -996,23 +994,6 @@ void TracedHandlesImpl::IterateYoungRootsWithOldHostsForTesting( } } -START_ALLOW_USE_DEPRECATED() - -void TracedHandlesImpl::Iterate( - v8::EmbedderHeapTracer::TracedGlobalHandleVisitor* visitor) { - for (auto* block : blocks_) { - for (auto* node : *block) { - if (node->is_in_use()) { - v8::Value* value = ToApi(node->handle()); - visitor->VisitTracedReference( - *reinterpret_cast*>(&value)); - } - } - } -} - -END_ALLOW_USE_DEPRECATED() - TracedHandles::TracedHandles(Isolate* isolate) : impl_(std::make_unique(isolate)) {} @@ -1092,15 +1073,6 @@ size_t TracedHandles::used_size_bytes() const { return impl_->used_size_bytes(); } -START_ALLOW_USE_DEPRECATED() - -void TracedHandles::Iterate( - v8::EmbedderHeapTracer::TracedGlobalHandleVisitor* visitor) { - impl_->Iterate(visitor); -} - -END_ALLOW_USE_DEPRECATED() - // static void TracedHandles::Destroy(Address* location) { if (!location) return; diff --git a/src/handles/traced-handles.h b/src/handles/traced-handles.h index 9d62298a9b..c30230da74 100644 --- a/src/handles/traced-handles.h +++ b/src/handles/traced-handles.h @@ -76,14 +76,6 @@ class V8_EXPORT_PRIVATE TracedHandles final { void IterateAndMarkYoungRootsWithOldHosts(RootVisitor*); void IterateYoungRootsWithOldHostsForTesting(RootVisitor*); - START_ALLOW_USE_DEPRECATED() - - // Iterates over all traces handles represented by - // `v8::TracedReferenceBase`. - void Iterate(v8::EmbedderHeapTracer::TracedGlobalHandleVisitor* visitor); - - END_ALLOW_USE_DEPRECATED() - size_t used_node_count() const; size_t total_size_bytes() const; size_t used_size_bytes() const; diff --git a/src/heap/cppgc-js/cpp-heap.cc b/src/heap/cppgc-js/cpp-heap.cc index e63084381e..67a33bd83c 100644 --- a/src/heap/cppgc-js/cpp-heap.cc +++ b/src/heap/cppgc-js/cpp-heap.cc @@ -548,8 +548,7 @@ void CppHeap::DetachIsolate() { // CHECK across all relevant embedders and setups. if (!isolate_) return; - // Delegate to existing EmbedderHeapTracer API to finish any ongoing garbage - // collection. + // Finish any ongoing garbage collection. if (isolate_->heap()->incremental_marking()->IsMarking()) { isolate_->heap()->FinalizeIncrementalMarkingAtomically( i::GarbageCollectionReason::kExternalFinalize); diff --git a/src/heap/embedder-tracing.cc b/src/heap/embedder-tracing.cc index 1ac9d44a31..9f4ddf5ec2 100644 --- a/src/heap/embedder-tracing.cc +++ b/src/heap/embedder-tracing.cc @@ -4,6 +4,7 @@ #include "src/heap/embedder-tracing.h" +#include "include/cppgc/common.h" #include "include/v8-cppgc.h" #include "src/base/logging.h" #include "src/handles/global-handles.h" @@ -15,51 +16,34 @@ namespace v8::internal { START_ALLOW_USE_DEPRECATED() -void LocalEmbedderHeapTracer::SetRemoteTracer(EmbedderHeapTracer* tracer) { - CHECK_NULL(cpp_heap_); - if (remote_tracer_) remote_tracer_->v8_isolate_ = nullptr; - - remote_tracer_ = tracer; - default_embedder_roots_handler_.SetTracer(tracer); - if (remote_tracer_) - remote_tracer_->v8_isolate_ = reinterpret_cast(isolate_); -} - void LocalEmbedderHeapTracer::SetCppHeap(CppHeap* cpp_heap) { - CHECK_NULL(remote_tracer_); cpp_heap_ = cpp_heap; } -namespace { -CppHeap::GarbageCollectionFlags ConvertTraceFlags( - EmbedderHeapTracer::TraceFlags flags) { - CppHeap::GarbageCollectionFlags result; - if (flags & EmbedderHeapTracer::TraceFlags::kForced) - result |= CppHeap::GarbageCollectionFlagValues::kForced; - if (flags & EmbedderHeapTracer::TraceFlags::kReduceMemory) - result |= CppHeap::GarbageCollectionFlagValues::kReduceMemory; - return result; -} -} // namespace +void LocalEmbedderHeapTracer::PrepareForTrace(CollectionType type) { + if (!InUse()) return; -void LocalEmbedderHeapTracer::PrepareForTrace( - EmbedderHeapTracer::TraceFlags flags, CollectionType type) { - if (cpp_heap_) - cpp_heap()->InitializeTracing(type == CollectionType::kMajor - ? cppgc::internal::CollectionType::kMajor - : cppgc::internal::CollectionType::kMinor, - ConvertTraceFlags(flags)); + CppHeap::GarbageCollectionFlags flags = + CppHeap::GarbageCollectionFlagValues::kNoFlags; + auto* heap = isolate_->heap(); + if (heap->is_current_gc_forced()) { + flags |= CppHeap::GarbageCollectionFlagValues::kForced; + } + if (heap->ShouldReduceMemory()) { + flags |= CppHeap::GarbageCollectionFlagValues::kReduceMemory; + } + cpp_heap()->InitializeTracing(type == CollectionType::kMajor + ? cppgc::internal::CollectionType::kMajor + : cppgc::internal::CollectionType::kMinor, + flags); } -void LocalEmbedderHeapTracer::TracePrologue( - EmbedderHeapTracer::TraceFlags flags) { +void LocalEmbedderHeapTracer::TracePrologue() { if (!InUse()) return; embedder_worklist_empty_ = false; - if (cpp_heap_) - cpp_heap()->StartTracing(); - else - remote_tracer_->TracePrologue(flags); + + cpp_heap()->StartTracing(); } void LocalEmbedderHeapTracer::TraceEpilogue() { @@ -67,16 +51,9 @@ void LocalEmbedderHeapTracer::TraceEpilogue() { // Resetting to state unknown as there may be follow up garbage collections // triggered from callbacks that have a different stack state. - embedder_stack_state_ = - EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers; + embedder_stack_state_ = cppgc::EmbedderStackState::kMayContainHeapPointers; - if (cpp_heap_) { - cpp_heap()->TraceEpilogue(); - } else { - EmbedderHeapTracer::TraceSummary summary; - remote_tracer_->TraceEpilogue(&summary); - UpdateRemoteStats(summary.allocated_size, summary.time); - } + cpp_heap()->TraceEpilogue(); } void LocalEmbedderHeapTracer::UpdateRemoteStats(size_t allocated_size, @@ -94,41 +71,21 @@ void LocalEmbedderHeapTracer::UpdateRemoteStats(size_t allocated_size, void LocalEmbedderHeapTracer::EnterFinalPause() { if (!InUse()) return; - if (cpp_heap_) - cpp_heap()->EnterFinalPause(embedder_stack_state_); - else - remote_tracer_->EnterFinalPause(embedder_stack_state_); + cpp_heap()->EnterFinalPause(embedder_stack_state_); } bool LocalEmbedderHeapTracer::Trace(double max_duration) { - if (!InUse()) return true; - - return cpp_heap_ ? cpp_heap_->AdvanceTracing(max_duration) - : remote_tracer_->AdvanceTracing(max_duration); + return !InUse() || cpp_heap()->AdvanceTracing(max_duration); } bool LocalEmbedderHeapTracer::IsRemoteTracingDone() { - return !InUse() || (cpp_heap_ ? cpp_heap()->IsTracingDone() - : remote_tracer_->IsTracingDone()); -} - -LocalEmbedderHeapTracer::ProcessingScope::ProcessingScope( - LocalEmbedderHeapTracer* tracer) - : tracer_(tracer), wrapper_descriptor_(tracer->wrapper_descriptor_) { - DCHECK(!tracer_->cpp_heap_); - wrapper_cache_.reserve(kWrapperCacheSize); -} - -LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() { - DCHECK(!tracer_->cpp_heap_); - if (!wrapper_cache_.empty()) { - tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_)); - } + return !InUse() || cpp_heap()->IsTracingDone(); } LocalEmbedderHeapTracer::WrapperInfo LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate, JSObject js_object) { + DCHECK(InUse()); WrapperInfo info; if (ExtractWrappableInfo(isolate, js_object, wrapper_descriptor(), &info)) { return info; @@ -136,32 +93,6 @@ LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate, return {nullptr, nullptr}; } -void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper( - JSObject js_object) { - DCHECK(js_object.MayHaveEmbedderFields()); - WrapperInfo info; - if (ExtractWrappableInfo(tracer_->isolate_, js_object, wrapper_descriptor_, - &info)) { - wrapper_cache_.push_back(std::move(info)); - FlushWrapperCacheIfFull(); - } -} - -void LocalEmbedderHeapTracer::ProcessingScope::FlushWrapperCacheIfFull() { - DCHECK(!tracer_->cpp_heap_); - if (wrapper_cache_.size() == wrapper_cache_.capacity()) { - tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_)); - wrapper_cache_.clear(); - wrapper_cache_.reserve(kWrapperCacheSize); - } -} - -void LocalEmbedderHeapTracer::ProcessingScope::AddWrapperInfoForTesting( - WrapperInfo info) { - wrapper_cache_.push_back(info); - FlushWrapperCacheIfFull(); -} - void LocalEmbedderHeapTracer::StartIncrementalMarkingIfNeeded() { if (!v8_flags.global_gc_scheduling || !v8_flags.incremental_marking) return; @@ -179,33 +110,16 @@ void LocalEmbedderHeapTracer::EmbedderWriteBarrier(Heap* heap, JSObject js_object) { DCHECK(InUse()); DCHECK(js_object.MayHaveEmbedderFields()); - if (cpp_heap_) { - DCHECK_NOT_NULL(heap->mark_compact_collector()); - const EmbedderDataSlot type_slot(js_object, - wrapper_descriptor_.wrappable_type_index); - const EmbedderDataSlot instance_slot( - js_object, wrapper_descriptor_.wrappable_instance_index); - heap->mark_compact_collector() - ->local_marking_worklists() - ->cpp_marking_state() - ->MarkAndPush(type_slot, instance_slot); - return; - } - LocalEmbedderHeapTracer::ProcessingScope scope(this); - scope.TracePossibleWrapper(js_object); -} - -bool DefaultEmbedderRootsHandler::IsRoot( - const v8::TracedReference& handle) { - return !tracer_ || tracer_->IsRootForNonTracingGC(handle); -} - -void DefaultEmbedderRootsHandler::ResetRoot( - const v8::TracedReference& handle) { - // Resetting is only called when IsRoot() returns false which - // can only happen the EmbedderHeapTracer is set on API level. - DCHECK(tracer_); - tracer_->ResetHandleInNonTracingGC(handle); + DCHECK_NOT_NULL(heap->mark_compact_collector()); + auto descriptor = wrapper_descriptor(); + const EmbedderDataSlot type_slot(js_object, descriptor.wrappable_type_index); + const EmbedderDataSlot instance_slot(js_object, + descriptor.wrappable_instance_index); + heap->mark_compact_collector() + ->local_marking_worklists() + ->cpp_marking_state() + ->MarkAndPush(type_slot, instance_slot); + return; } END_ALLOW_USE_DEPRECATED() diff --git a/src/heap/embedder-tracing.h b/src/heap/embedder-tracing.h index 9843076258..6166eb0cee 100644 --- a/src/heap/embedder-tracing.h +++ b/src/heap/embedder-tracing.h @@ -8,8 +8,6 @@ #include #include "include/v8-cppgc.h" -#include "include/v8-embedder-heap.h" -#include "include/v8-traced-handle.h" #include "src/common/globals.h" #include "src/execution/isolate.h" #include "src/flags/flags.h" @@ -21,21 +19,6 @@ namespace internal { class Heap; class JSObject; -START_ALLOW_USE_DEPRECATED() - -class V8_EXPORT_PRIVATE DefaultEmbedderRootsHandler final - : public EmbedderRootsHandler { - public: - bool IsRoot(const v8::TracedReference& handle) final; - - void ResetRoot(const v8::TracedReference& handle) final; - - void SetTracer(EmbedderHeapTracer* tracer) { tracer_ = tracer; } - - private: - EmbedderHeapTracer* tracer_ = nullptr; -}; - class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { public: enum class CollectionType : uint8_t { @@ -43,7 +26,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { kMajor, }; using WrapperInfo = std::pair; - using WrapperCache = std::vector; // WrapperInfo is passed over the API. Use VerboseWrapperInfo to access pair // internals in a named way. See ProcessingScope::TracePossibleJSWrapper() @@ -63,25 +45,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { const WrapperInfo& raw_info; }; - class V8_EXPORT_PRIVATE V8_NODISCARD ProcessingScope { - public: - explicit ProcessingScope(LocalEmbedderHeapTracer* tracer); - ~ProcessingScope(); - - void TracePossibleWrapper(JSObject js_object); - - void AddWrapperInfoForTesting(WrapperInfo info); - - private: - static constexpr size_t kWrapperCacheSize = 1000; - - void FlushWrapperCacheIfFull(); - - LocalEmbedderHeapTracer* const tracer_; - const WrapperDescriptor wrapper_descriptor_; - WrapperCache wrapper_cache_; - }; - static V8_INLINE bool ExtractWrappableInfo(Isolate*, JSObject, const WrapperDescriptor&, WrapperInfo*); @@ -92,23 +55,15 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {} ~LocalEmbedderHeapTracer() { - if (remote_tracer_) remote_tracer_->v8_isolate_ = nullptr; // CppHeap is not detached from Isolate here. Detaching is done explicitly // on Isolate/Heap/CppHeap destruction. } - bool InUse() const { return cpp_heap_ || (remote_tracer_ != nullptr); } - // This method doesn't take CppHeap into account. - EmbedderHeapTracer* remote_tracer() const { - DCHECK_NULL(cpp_heap_); - return remote_tracer_; - } + bool InUse() const { return cpp_heap_; } - void SetRemoteTracer(EmbedderHeapTracer* tracer); void SetCppHeap(CppHeap* cpp_heap); - void PrepareForTrace(EmbedderHeapTracer::TraceFlags flags, - CollectionType type); - void TracePrologue(EmbedderHeapTracer::TraceFlags flags); + void PrepareForTrace(CollectionType type); + void TracePrologue(); void TraceEpilogue(); void EnterFinalPause(); bool Trace(double deadline); @@ -125,8 +80,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { bool SupportsIncrementalEmbedderSteps() const { if (!InUse()) return false; - return cpp_heap_ ? v8_flags.cppheap_incremental_marking - : v8_flags.incremental_marking_wrappers; + return v8_flags.cppheap_incremental_marking; } void SetEmbedderWorklistEmpty(bool is_empty) { @@ -158,18 +112,9 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object); - void SetWrapperDescriptor(const WrapperDescriptor& wrapper_descriptor) { - DCHECK_NULL(cpp_heap_); - wrapper_descriptor_ = wrapper_descriptor; - } - void UpdateRemoteStats(size_t, double); - DefaultEmbedderRootsHandler& default_embedder_roots_handler() { - return default_embedder_roots_handler_; - } - - EmbedderHeapTracer::EmbedderStackState embedder_stack_state() const { + cppgc::EmbedderStackState embedder_stack_state() const { return embedder_stack_state_; } @@ -178,39 +123,21 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { private: static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB; - static constexpr WrapperDescriptor::InternalFieldIndex - kDefaultWrapperTypeEmbedderIndex = 0; - static constexpr WrapperDescriptor::InternalFieldIndex - kDefaultWrapperInstanceEmbedderIndex = 1; - - static constexpr WrapperDescriptor GetDefaultWrapperDescriptor() { - // The default descriptor assumes the indices that known embedders use. - return WrapperDescriptor(kDefaultWrapperTypeEmbedderIndex, - kDefaultWrapperInstanceEmbedderIndex, - WrapperDescriptor::kUnknownEmbedderId); - } - CppHeap* cpp_heap() { DCHECK_NOT_NULL(cpp_heap_); - DCHECK_NULL(remote_tracer_); DCHECK_IMPLIES(isolate_, cpp_heap_ == isolate_->heap()->cpp_heap()); return cpp_heap_; } WrapperDescriptor wrapper_descriptor() { - if (cpp_heap_) - return cpp_heap()->wrapper_descriptor(); - else - return wrapper_descriptor_; + return cpp_heap()->wrapper_descriptor(); } Isolate* const isolate_; - EmbedderHeapTracer* remote_tracer_ = nullptr; CppHeap* cpp_heap_ = nullptr; - DefaultEmbedderRootsHandler default_embedder_roots_handler_; - EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ = - EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers; + cppgc::EmbedderStackState embedder_stack_state_ = + cppgc::EmbedderStackState::kMayContainHeapPointers; // Indicates whether the embedder worklist was observed empty on the main // thread. This is opportunistic as concurrent marking tasks may hold local // segments of potential embedder fields to move to the main thread. @@ -229,16 +156,9 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { size_t allocated_size_limit_for_check = 0; } remote_stats_; - // Default descriptor only used when the embedder is using EmbedderHeapTracer. - // The value is overriden by CppHeap with values that the embedder provided - // upon initialization. - WrapperDescriptor wrapper_descriptor_ = GetDefaultWrapperDescriptor(); - friend class EmbedderStackStateScope; }; -END_ALLOW_USE_DEPRECATED() - } // namespace internal } // namespace v8 diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h index 3bc12b6bab..e6d34cb367 100644 --- a/src/heap/gc-tracer.h +++ b/src/heap/gc-tracer.h @@ -347,7 +347,7 @@ class V8_EXPORT_PRIVATE GCTracer { double time_ms = 0) const; // Allocation throughput in the embedder in bytes/millisecond in the - // last time_ms milliseconds. Reported through v8::EmbedderHeapTracer. + // last time_ms milliseconds. // Returns 0 if no allocation events have been recorded. double EmbedderAllocationThroughputInBytesPerMillisecond( double time_ms = 0) const; @@ -368,7 +368,7 @@ class V8_EXPORT_PRIVATE GCTracer { double CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const; // Allocation throughput in the embedder in bytes/milliseconds in the last - // kThroughputTimeFrameMs seconds. Reported through v8::EmbedderHeapTracer. + // kThroughputTimeFrameMs seconds. // Returns 0 if no allocation events have been recorded. double CurrentEmbedderAllocationThroughputInBytesPerMillisecond() const; diff --git a/src/heap/heap.cc b/src/heap/heap.cc index f0ffe5f236..c971669f05 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -5612,8 +5612,6 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info, dead_object_stats_.reset(new ObjectStats(this)); } local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate())); - embedder_roots_handler_ = - &local_embedder_heap_tracer()->default_embedder_roots_handler(); if (Heap::AllocationTrackerForDebugging::IsNeeded()) { allocation_tracker_for_debugging_ = std::make_unique(this); @@ -5789,30 +5787,6 @@ void Heap::NotifyOldGenerationExpansion(AllocationSpace space, } } -START_ALLOW_USE_DEPRECATED() - -void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) { - DCHECK_EQ(gc_state(), HeapState::NOT_IN_GC); - // Setting a tracer is only supported when CppHeap is not used. - DCHECK_IMPLIES(tracer, !cpp_heap_); - local_embedder_heap_tracer()->SetRemoteTracer(tracer); -} - -EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const { - return local_embedder_heap_tracer()->remote_tracer(); -} - -EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const { - if (is_current_gc_forced()) { - return EmbedderHeapTracer::TraceFlags::kForced; - } else if (ShouldReduceMemory()) { - return EmbedderHeapTracer::TraceFlags::kReduceMemory; - } - return EmbedderHeapTracer::TraceFlags::kNoFlags; -} - -END_ALLOW_USE_DEPRECATED() - void Heap::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) { embedder_roots_handler_ = handler; } @@ -5847,20 +5821,6 @@ void Heap::SetStackStart(void* stack_start) { return isolate_->thread_local_top()->stack_; } -void Heap::RegisterExternallyReferencedObject(Address* location) { - Object object = TracedHandles::Mark(location, TracedHandles::MarkMode::kAll); - if (!object.IsHeapObject()) { - // The embedder is not aware of whether numbers are materialized as heap - // objects are just passed around as Smis. - return; - } - HeapObject heap_object = HeapObject::cast(object); - DCHECK(IsValidHeapObject(this, heap_object)); - DCHECK(incremental_marking()->IsMarking() || - mark_compact_collector()->in_use()); - mark_compact_collector()->MarkExternallyReferencedObject(heap_object); -} - void Heap::StartTearDown() { // Finish any ongoing sweeping to avoid stray background tasks still accessing // the heap during teardown. diff --git a/src/heap/heap.h b/src/heap/heap.h index 39e6efb080..0df9e0ad15 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -1163,16 +1163,6 @@ class Heap { return local_embedder_heap_tracer_.get(); } - START_ALLOW_USE_DEPRECATED() - - V8_EXPORT_PRIVATE void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer); - EmbedderHeapTracer* GetEmbedderHeapTracer() const; - EmbedderHeapTracer::TraceFlags flags_for_embedder_tracer() const; - - END_ALLOW_USE_DEPRECATED() - - void RegisterExternallyReferencedObject(Address* location); - // =========================================================================== // Unified heap (C++) support. =============================================== // =========================================================================== diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc index 2e1cf40605..8a796e2bf4 100644 --- a/src/heap/incremental-marking.cc +++ b/src/heap/incremental-marking.cc @@ -315,14 +315,13 @@ void IncrementalMarking::StartMarkingMajor() { isolate()->external_pointer_table().StartCompactingIfNeeded(); #endif // V8_COMPRESS_POINTERS - auto embedder_flags = heap_->flags_for_embedder_tracer(); { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE); // PrepareForTrace should be called before visitor initialization in // StartMarking. It is only used with CppHeap. heap_->local_embedder_heap_tracer()->PrepareForTrace( - embedder_flags, LocalEmbedderHeapTracer::CollectionType::kMajor); + LocalEmbedderHeapTracer::CollectionType::kMajor); } major_collector_->StartMarking(); @@ -358,7 +357,7 @@ void IncrementalMarking::StartMarkingMajor() { // marking (including write barriers) is fully set up. TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE); - heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags); + heap_->local_embedder_heap_tracer()->TracePrologue(); } heap_->InvokeIncrementalMarkingEpilogueCallbacks(); @@ -555,8 +554,6 @@ void IncrementalMarking::EmbedderStep(double expected_duration_ms, return; } - constexpr size_t kObjectsToProcessBeforeDeadlineCheck = 500; - TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING); LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer(); const double start = heap_->MonotonicallyIncreasingTimeInMs(); @@ -564,21 +561,6 @@ void IncrementalMarking::EmbedderStep(double expected_duration_ms, bool empty_worklist = true; if (local_marking_worklists()->PublishWrapper()) { DCHECK(local_marking_worklists()->IsWrapperEmpty()); - } else { - // Cannot directly publish wrapper objects. - LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer); - HeapObject object; - size_t cnt = 0; - while (local_marking_worklists()->PopWrapper(&object)) { - scope.TracePossibleWrapper(JSObject::cast(object)); - if (++cnt == kObjectsToProcessBeforeDeadlineCheck) { - if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) { - empty_worklist = false; - break; - } - cnt = 0; - } - } } // |deadline - heap_->MonotonicallyIncreasingTimeInMs()| could be negative, // which means |local_tracer| won't do any actual tracing, so there is no diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h index 6cef400d0e..271df8c1b9 100644 --- a/src/heap/mark-compact-inl.h +++ b/src/heap/mark-compact-inl.h @@ -51,16 +51,6 @@ void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) { } } -void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject obj) { - DCHECK(ReadOnlyHeap::Contains(obj) || heap()->Contains(obj)); - if (marking_state()->WhiteToGrey(obj)) { - local_marking_worklists()->Push(obj); - if (V8_UNLIKELY(v8_flags.track_retaining_path)) { - heap_->AddRetainingRoot(Root::kWrapperTracing, obj); - } - } -} - // static void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot, HeapObject target) { diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 49cbaea44f..4f0493830c 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -910,13 +910,12 @@ void MarkCompactCollector::Prepare() { DCHECK(!heap_->memory_allocator()->unmapper()->IsRunning()); if (!heap()->incremental_marking()->IsMarking()) { - const auto embedder_flags = heap_->flags_for_embedder_tracer(); { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE); // PrepareForTrace should be called before visitor initialization in // StartMarking. heap_->local_embedder_heap_tracer()->PrepareForTrace( - embedder_flags, LocalEmbedderHeapTracer::CollectionType::kMajor); + LocalEmbedderHeapTracer::CollectionType::kMajor); } StartCompaction(StartCompactionMode::kAtomic); StartMarking(); @@ -924,7 +923,7 @@ void MarkCompactCollector::Prepare() { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE); // TracePrologue immediately starts marking which requires V8 worklists to // be set up. - heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags); + heap_->local_embedder_heap_tracer()->TracePrologue(); } #ifdef V8_COMPRESS_POINTERS heap_->isolate()->external_pointer_table().StartCompactingIfNeeded(); @@ -2112,8 +2111,9 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor) { if (!heap_->cpp_heap() && heap_->local_embedder_heap_tracer()->InUse()) { // Conservative global handle scanning is necessary for keeping - // v8::TracedReference alive from the stack. This is only needed when using - // `EmbedderHeapTracer` and not using `CppHeap`. + // v8::TracedReference alive from the stack. + // + // TODO(v8:v8:13207): Remove as this is not required when using `CppHeap`. auto& stack = heap()->stack(); if (heap_->local_embedder_heap_tracer()->embedder_stack_state() == cppgc::EmbedderStackState::kMayContainHeapPointers) { @@ -2551,14 +2551,6 @@ void MarkCompactCollector::PerformWrapperTracing() { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING); if (local_marking_worklists()->PublishWrapper()) { DCHECK(local_marking_worklists()->IsWrapperEmpty()); - } else { - // Cannot directly publish wrapper objects. - LocalEmbedderHeapTracer::ProcessingScope scope( - heap_->local_embedder_heap_tracer()); - HeapObject object; - while (local_marking_worklists()->PopWrapper(&object)) { - scope.TracePossibleWrapper(JSObject::cast(object)); - } } heap_->local_embedder_heap_tracer()->Trace( std::numeric_limits::infinity()); @@ -5790,8 +5782,8 @@ void MinorMarkCompactCollector::SweepArrayBufferExtensions() { void MinorMarkCompactCollector::PerformWrapperTracing() { if (!heap_->local_embedder_heap_tracer()->InUse()) return; - // TODO(v8:13475): DCHECK instead of bailing out once EmbedderHeapTracer is - // removed. + // TODO(v8:v8:13207): DCHECK instead of bailing out as only CppHeap is + // supported. if (!local_marking_worklists()->PublishWrapper()) return; DCHECK_NOT_NULL(CppHeap::From(heap_->cpp_heap())); DCHECK(CppHeap::From(heap_->cpp_heap())->generational_gc_supported()); @@ -5986,21 +5978,20 @@ void MinorMarkCompactCollector::Prepare() { // Probably requires more. if (!heap()->incremental_marking()->IsMarking()) { - const auto embedder_flags = heap_->flags_for_embedder_tracer(); { TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_EMBEDDER_PROLOGUE); // PrepareForTrace should be called before visitor initialization in // StartMarking. heap_->local_embedder_heap_tracer()->PrepareForTrace( - embedder_flags, LocalEmbedderHeapTracer::CollectionType::kMinor); + LocalEmbedderHeapTracer::CollectionType::kMinor); } StartMarking(); { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE); // TracePrologue immediately starts marking which requires V8 worklists to // be set up. - heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags); + heap_->local_embedder_heap_tracer()->TracePrologue(); } } diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index 254a351758..166f134a04 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -452,9 +452,6 @@ class MarkCompactCollector final : public CollectorBase { explicit MarkCompactCollector(Heap* heap); ~MarkCompactCollector() final; - // Used by wrapper tracing. - V8_INLINE void MarkExternallyReferencedObject(HeapObject obj); - std::unique_ptr CreateRememberedSetUpdatingItem( MemoryChunk* chunk); diff --git a/test/unittests/BUILD.gn b/test/unittests/BUILD.gn index fe6fe5bc50..cd1e19f570 100644 --- a/test/unittests/BUILD.gn +++ b/test/unittests/BUILD.gn @@ -412,7 +412,6 @@ v8_source_set("unittests_sources") { "heap/cppgc-js/unified-heap-utils.cc", "heap/cppgc-js/unified-heap-utils.h", "heap/cppgc-js/young-unified-heap-unittest.cc", - "heap/embedder-tracing-unittest.cc", "heap/gc-idle-time-handler-unittest.cc", "heap/gc-tracer-unittest.cc", "heap/global-handles-unittest.cc", diff --git a/test/unittests/heap/embedder-tracing-unittest.cc b/test/unittests/heap/embedder-tracing-unittest.cc deleted file mode 100644 index d2a54aecc2..0000000000 --- a/test/unittests/heap/embedder-tracing-unittest.cc +++ /dev/null @@ -1,607 +0,0 @@ -// Copyright 2016 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/heap/embedder-tracing.h" - -#include "include/v8-embedder-heap.h" -#include "include/v8-function.h" -#include "include/v8-template.h" -#include "src/handles/global-handles.h" -#include "src/heap/gc-tracer.h" -#include "src/heap/heap.h" -#include "test/unittests/heap/heap-utils.h" -#include "test/unittests/test-utils.h" -#include "testing/gmock/include/gmock/gmock.h" -#include "testing/gtest/include/gtest/gtest.h" - -namespace v8 { -namespace internal { - -using LocalEmbedderHeapTracerWithIsolate = TestWithHeapInternals; - -namespace heap { - -using testing::StrictMock; -using testing::_; -using testing::Return; -using v8::EmbedderHeapTracer; -using v8::internal::LocalEmbedderHeapTracer; - -namespace { - -LocalEmbedderHeapTracer::WrapperInfo CreateWrapperInfo() { - return LocalEmbedderHeapTracer::WrapperInfo(nullptr, nullptr); -} - -} // namespace - -START_ALLOW_USE_DEPRECATED() -class MockEmbedderHeapTracer : public EmbedderHeapTracer { - public: - MOCK_METHOD(void, TracePrologue, (EmbedderHeapTracer::TraceFlags), - (override)); - MOCK_METHOD(void, TraceEpilogue, (EmbedderHeapTracer::TraceSummary*), - (override)); - MOCK_METHOD(void, EnterFinalPause, (EmbedderHeapTracer::EmbedderStackState), - (override)); - MOCK_METHOD(bool, IsTracingDone, (), (override)); - MOCK_METHOD(void, RegisterV8References, - ((const std::vector >&)), (override)); - MOCK_METHOD(bool, AdvanceTracing, (double deadline_in_ms), (override)); -}; - -END_ALLOW_USE_DEPRECATED() - -TEST(LocalEmbedderHeapTracer, InUse) { - MockEmbedderHeapTracer mock_remote_tracer; - LocalEmbedderHeapTracer local_tracer(nullptr); - local_tracer.SetRemoteTracer(&mock_remote_tracer); - EXPECT_TRUE(local_tracer.InUse()); -} - -TEST(LocalEmbedderHeapTracer, NoRemoteTracer) { - LocalEmbedderHeapTracer local_tracer(nullptr); - // We should be able to call all functions without a remote tracer being - // attached. - EXPECT_FALSE(local_tracer.InUse()); - local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kNoFlags); - local_tracer.EnterFinalPause(); - bool done = local_tracer.Trace(std::numeric_limits::infinity()); - EXPECT_TRUE(done); - local_tracer.TraceEpilogue(); -} - -TEST(LocalEmbedderHeapTracer, TracePrologueForwards) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(nullptr); - local_tracer.SetRemoteTracer(&remote_tracer); - EXPECT_CALL(remote_tracer, TracePrologue(_)); - local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kNoFlags); -} - -TEST(LocalEmbedderHeapTracer, TracePrologueForwardsMemoryReducingFlag) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(nullptr); - local_tracer.SetRemoteTracer(&remote_tracer); - EXPECT_CALL(remote_tracer, - TracePrologue(EmbedderHeapTracer::TraceFlags::kReduceMemory)); - local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kReduceMemory); -} - -TEST(LocalEmbedderHeapTracer, TraceEpilogueForwards) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(nullptr); - local_tracer.SetRemoteTracer(&remote_tracer); - EXPECT_CALL(remote_tracer, TraceEpilogue(_)); - local_tracer.TraceEpilogue(); -} - -TEST(LocalEmbedderHeapTracer, EnterFinalPauseForwards) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(nullptr); - local_tracer.SetRemoteTracer(&remote_tracer); - EXPECT_CALL(remote_tracer, EnterFinalPause(_)); - local_tracer.EnterFinalPause(); -} - -TEST(LocalEmbedderHeapTracer, IsRemoteTracingDoneForwards) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(nullptr); - local_tracer.SetRemoteTracer(&remote_tracer); - EXPECT_CALL(remote_tracer, IsTracingDone()); - local_tracer.IsRemoteTracingDone(); -} - -TEST(LocalEmbedderHeapTracer, EnterFinalPauseDefaultStackStateUnkown) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(nullptr); - local_tracer.SetRemoteTracer(&remote_tracer); - // The default stack state is expected to be unkown. - EXPECT_CALL( - remote_tracer, - EnterFinalPause( - EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers)); - local_tracer.EnterFinalPause(); -} - -TEST_F(LocalEmbedderHeapTracerWithIsolate, - EnterFinalPauseStackStateIsForwarded) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(isolate()); - local_tracer.SetRemoteTracer(&remote_tracer); - EmbedderStackStateScope scope = - EmbedderStackStateScope::ExplicitScopeForTesting( - &local_tracer, - EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers); - EXPECT_CALL( - remote_tracer, - EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers)); - local_tracer.EnterFinalPause(); -} - -TEST_F(LocalEmbedderHeapTracerWithIsolate, TemporaryEmbedderStackState) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(isolate()); - local_tracer.SetRemoteTracer(&remote_tracer); - // Default is unknown, see above. - { - EmbedderStackStateScope scope = - EmbedderStackStateScope::ExplicitScopeForTesting( - &local_tracer, - EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers); - EXPECT_CALL(remote_tracer, - EnterFinalPause( - EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers)); - local_tracer.EnterFinalPause(); - } -} - -TEST_F(LocalEmbedderHeapTracerWithIsolate, - TemporaryEmbedderStackStateRestores) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(isolate()); - local_tracer.SetRemoteTracer(&remote_tracer); - // Default is unknown, see above. - { - EmbedderStackStateScope scope = - EmbedderStackStateScope::ExplicitScopeForTesting( - &local_tracer, - EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers); - { - EmbedderStackStateScope nested_scope = - EmbedderStackStateScope::ExplicitScopeForTesting( - &local_tracer, - EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers); - EXPECT_CALL( - remote_tracer, - EnterFinalPause( - EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers)); - local_tracer.EnterFinalPause(); - } - EXPECT_CALL(remote_tracer, - EnterFinalPause( - EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers)); - local_tracer.EnterFinalPause(); - } -} - -TEST_F(LocalEmbedderHeapTracerWithIsolate, TraceEpilogueStackStateResets) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(isolate()); - local_tracer.SetRemoteTracer(&remote_tracer); - EmbedderStackStateScope scope = - EmbedderStackStateScope::ExplicitScopeForTesting( - &local_tracer, - EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers); - EXPECT_CALL( - remote_tracer, - EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers)); - local_tracer.EnterFinalPause(); - EXPECT_CALL(remote_tracer, TraceEpilogue(_)); - local_tracer.TraceEpilogue(); - EXPECT_CALL( - remote_tracer, - EnterFinalPause( - EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers)); - local_tracer.EnterFinalPause(); -} - -TEST(LocalEmbedderHeapTracer, IsRemoteTracingDoneIncludesRemote) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(nullptr); - local_tracer.SetRemoteTracer(&remote_tracer); - EXPECT_CALL(remote_tracer, IsTracingDone()); - local_tracer.IsRemoteTracingDone(); -} - -TEST(LocalEmbedderHeapTracer, RegisterV8ReferencesWithRemoteTracer) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(nullptr); - local_tracer.SetRemoteTracer(&remote_tracer); - { - LocalEmbedderHeapTracer::ProcessingScope scope(&local_tracer); - scope.AddWrapperInfoForTesting(CreateWrapperInfo()); - EXPECT_CALL(remote_tracer, RegisterV8References(_)); - } - EXPECT_CALL(remote_tracer, IsTracingDone()).WillOnce(Return(false)); - EXPECT_FALSE(local_tracer.IsRemoteTracingDone()); -} - -TEST_F(LocalEmbedderHeapTracerWithIsolate, SetRemoteTracerSetsIsolate) { - StrictMock remote_tracer; - LocalEmbedderHeapTracer local_tracer(isolate()); - local_tracer.SetRemoteTracer(&remote_tracer); - EXPECT_EQ(isolate(), reinterpret_cast(remote_tracer.isolate())); -} - -TEST_F(LocalEmbedderHeapTracerWithIsolate, DestructorClearsIsolate) { - StrictMock remote_tracer; - { - LocalEmbedderHeapTracer local_tracer(isolate()); - local_tracer.SetRemoteTracer(&remote_tracer); - EXPECT_EQ(isolate(), reinterpret_cast(remote_tracer.isolate())); - } - EXPECT_EQ(nullptr, remote_tracer.isolate()); -} - -namespace { - -v8::Local ConstructTraceableJSApiObject( - v8::Local context, void* first_field, void* second_field) { - v8::EscapableHandleScope scope(context->GetIsolate()); - v8::Local function_t = - v8::FunctionTemplate::New(context->GetIsolate()); - v8::Local instance_t = function_t->InstanceTemplate(); - instance_t->SetInternalFieldCount(2); - v8::Local function = - function_t->GetFunction(context).ToLocalChecked(); - v8::Local instance = - function->NewInstance(context).ToLocalChecked(); - instance->SetAlignedPointerInInternalField(0, first_field); - instance->SetAlignedPointerInInternalField(1, second_field); - EXPECT_FALSE(instance.IsEmpty()); - i::Handle js_obj = v8::Utils::OpenHandle(*instance); - EXPECT_EQ(i::JS_API_OBJECT_TYPE, js_obj->map().instance_type()); - return scope.Escape(instance); -} - -enum class TracePrologueBehavior { kNoop, kCallV8WriteBarrier }; - -START_ALLOW_USE_DEPRECATED() - -class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer { - public: - TestEmbedderHeapTracer() = default; - TestEmbedderHeapTracer(TracePrologueBehavior prologue_behavior, - v8::Global array) - : prologue_behavior_(prologue_behavior), array_(std::move(array)) {} - - void RegisterV8References( - const std::vector>& embedder_fields) final { - registered_from_v8_.insert(registered_from_v8_.end(), - embedder_fields.begin(), embedder_fields.end()); - } - - void AddReferenceForTracing(v8::TracedReference* ref) { - to_register_with_v8_references_.push_back(ref); - } - - bool AdvanceTracing(double deadline_in_ms) final { - for (auto ref : to_register_with_v8_references_) { - RegisterEmbedderReference(ref->As()); - } - to_register_with_v8_references_.clear(); - return true; - } - - bool IsTracingDone() final { return to_register_with_v8_references_.empty(); } - - void TracePrologue(EmbedderHeapTracer::TraceFlags) final { - if (prologue_behavior_ == TracePrologueBehavior::kCallV8WriteBarrier) { - auto local = array_.Get(isolate()); - local - ->Set(local->GetCreationContext().ToLocalChecked(), 0, - v8::Object::New(isolate())) - .Check(); - } - } - - void TraceEpilogue(TraceSummary*) final {} - void EnterFinalPause(EmbedderStackState) final {} - - bool IsRegisteredFromV8(void* first_field) const { - for (auto pair : registered_from_v8_) { - if (pair.first == first_field) return true; - } - return false; - } - - void DoNotConsiderAsRootForScavenge(v8::TracedReference* handle) { - handle->SetWrapperClassId(17); - non_root_handles_.push_back(handle); - } - - bool IsRootForNonTracingGC( - const v8::TracedReference& handle) final { - return handle.WrapperClassId() != 17; - } - - void ResetHandleInNonTracingGC( - const v8::TracedReference& handle) final { - for (auto* non_root_handle : non_root_handles_) { - if (*non_root_handle == handle) { - non_root_handle->Reset(); - } - } - } - - private: - std::vector> registered_from_v8_; - std::vector*> to_register_with_v8_references_; - TracePrologueBehavior prologue_behavior_ = TracePrologueBehavior::kNoop; - v8::Global array_; - std::vector*> non_root_handles_; -}; - -class V8_NODISCARD TemporaryEmbedderHeapTracerScope final { - public: - TemporaryEmbedderHeapTracerScope(v8::Isolate* isolate, - v8::EmbedderHeapTracer* tracer) - : isolate_(isolate) { - isolate_->SetEmbedderHeapTracer(tracer); - } - - ~TemporaryEmbedderHeapTracerScope() { - isolate_->SetEmbedderHeapTracer(nullptr); - } - - private: - v8::Isolate* const isolate_; -}; - -END_ALLOW_USE_DEPRECATED() - -} // namespace - -using EmbedderTracingTest = TestWithHeapInternalsAndContext; - -TEST_F(EmbedderTracingTest, V8RegisterEmbedderReference) { - // Tests that wrappers are properly registered with the embedder heap - // tracer. - ManualGCScope manual_gc(i_isolate()); - TestEmbedderHeapTracer tracer; - heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer); - v8::HandleScope scope(v8_isolate()); - v8::Local context = v8::Context::New(v8_isolate()); - v8::Context::Scope context_scope(context); - - void* first_and_second_field = reinterpret_cast(0x2); - v8::Local api_object = ConstructTraceableJSApiObject( - context, first_and_second_field, first_and_second_field); - ASSERT_FALSE(api_object.IsEmpty()); - CollectGarbage(i::OLD_SPACE); - EXPECT_TRUE(tracer.IsRegisteredFromV8(first_and_second_field)); -} - -TEST_F(EmbedderTracingTest, EmbedderRegisteringV8Reference) { - // Tests that references that are registered by the embedder heap tracer are - // considered live by V8. - ManualGCScope manual_gc(i_isolate()); - TestEmbedderHeapTracer tracer; - heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer); - v8::HandleScope scope(v8_isolate()); - v8::Local context = v8::Context::New(v8_isolate()); - v8::Context::Scope context_scope(context); - - auto handle = std::make_unique>(); - { - v8::HandleScope inner_scope(v8_isolate()); - v8::Local o = - v8::Local::New(v8_isolate(), v8::Object::New(v8_isolate())); - handle->Reset(v8_isolate(), o); - } - tracer.AddReferenceForTracing(handle.get()); - CollectGarbage(i::OLD_SPACE); - EXPECT_FALSE(handle->IsEmpty()); -} - -TEST_F(EmbedderTracingTest, FinalizeTracingIsNoopWhenNotMarking) { - ManualGCScope manual_gc(i_isolate()); - TestEmbedderHeapTracer tracer; - heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer); - - // Finalize a potentially running garbage collection. - CollectGarbage(OLD_SPACE); - EXPECT_TRUE(i_isolate()->heap()->incremental_marking()->IsStopped()); - - int gc_counter = i_isolate()->heap()->gc_count(); - tracer.FinalizeTracing(); - EXPECT_TRUE(i_isolate()->heap()->incremental_marking()->IsStopped()); - EXPECT_EQ(gc_counter, i_isolate()->heap()->gc_count()); -} - -TEST_F(EmbedderTracingTest, FinalizeTracingWhenMarking) { - if (!v8_flags.incremental_marking) return; - ManualGCScope manual_gc(i_isolate()); - Heap* heap = i_isolate()->heap(); - TestEmbedderHeapTracer tracer; - heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer); - - // Finalize a potentially running garbage collection. - CollectGarbage(OLD_SPACE); - if (heap->sweeping_in_progress()) { - heap->EnsureSweepingCompleted( - Heap::SweepingForcedFinalizationMode::kV8Only); - } - heap->tracer()->StopFullCycleIfNeeded(); - EXPECT_TRUE(heap->incremental_marking()->IsStopped()); - - i::IncrementalMarking* marking = heap->incremental_marking(); - { - IsolateSafepointScope scope(heap); - heap->tracer()->StartCycle( - GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting, - "collector cctest", GCTracer::MarkingType::kIncremental); - marking->Start(GarbageCollector::MARK_COMPACTOR, - GarbageCollectionReason::kTesting); - } - - // Sweeping is not runing so we should immediately start marking. - EXPECT_TRUE(marking->IsMarking()); - tracer.FinalizeTracing(); - EXPECT_TRUE(marking->IsStopped()); -} - -namespace { - -void ConstructJSObject(v8::Isolate* isolate, v8::Local context, - v8::TracedReference* handle) { - v8::HandleScope scope(isolate); - v8::Local object(v8::Object::New(isolate)); - EXPECT_FALSE(object.IsEmpty()); - *handle = v8::TracedReference(isolate, object); - EXPECT_FALSE(handle->IsEmpty()); -} - -} // namespace - -TEST_F(EmbedderTracingTest, TracedReferenceHandlesMarking) { - ManualGCScope manual_gc(i_isolate()); - v8::HandleScope scope(v8_isolate()); - auto live = std::make_unique>(); - auto dead = std::make_unique>(); - live->Reset(v8_isolate(), v8::Undefined(v8_isolate())); - dead->Reset(v8_isolate(), v8::Undefined(v8_isolate())); - auto* traced_handles = i_isolate()->traced_handles(); - { - TestEmbedderHeapTracer tracer; - heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer); - tracer.AddReferenceForTracing(live.get()); - const size_t initial_count = traced_handles->used_node_count(); - { - // Conservative scanning may find stale pointers to on-stack handles. - // Disable scanning, assuming the slots are overwritten. - DisableConservativeStackScanningScopeForTesting no_stack_scanning( - i_isolate()->heap()); - EmbedderStackStateScope scope = - EmbedderStackStateScope::ExplicitScopeForTesting( - reinterpret_cast(v8_isolate()) - ->heap() - ->local_embedder_heap_tracer(), - EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers); - FullGC(); - } - const size_t final_count = traced_handles->used_node_count(); - // Handles are not black allocated, so `dead` is immediately reclaimed. - EXPECT_EQ(initial_count, final_count + 1); - } -} - -namespace { - -START_ALLOW_USE_DEPRECATED() - -class TracedReferenceVisitor final - : public v8::EmbedderHeapTracer::TracedGlobalHandleVisitor { - public: - ~TracedReferenceVisitor() override = default; - - void VisitTracedReference(const TracedReference& value) final { - if (value.WrapperClassId() == 57) { - count_++; - } - } - - size_t count() const { return count_; } - - private: - size_t count_ = 0; -}; - -END_ALLOW_USE_DEPRECATED() - -} // namespace - -TEST_F(EmbedderTracingTest, TracedReferenceIteration) { - ManualGCScope manual_gc(i_isolate()); - v8::HandleScope scope(v8_isolate()); - TestEmbedderHeapTracer tracer; - heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer); - - auto handle = std::make_unique>(); - ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(), - handle.get()); - EXPECT_FALSE(handle->IsEmpty()); - handle->SetWrapperClassId(57); - TracedReferenceVisitor visitor; - { - v8::HandleScope new_scope(v8_isolate()); - tracer.IterateTracedGlobalHandles(&visitor); - } - EXPECT_EQ(1u, visitor.count()); -} - -TEST_F(EmbedderTracingTest, TracePrologueCallingIntoV8WriteBarrier) { - // Regression test: https://crbug.com/940003 - if (!v8_flags.incremental_marking) return; - ManualGCScope manual_gc(isolate()); - v8::HandleScope scope(v8_isolate()); - v8::Global global; - { - v8::HandleScope new_scope(v8_isolate()); - auto local = v8::Array::New(v8_isolate(), 10); - global.Reset(v8_isolate(), local); - } - TestEmbedderHeapTracer tracer(TracePrologueBehavior::kCallV8WriteBarrier, - std::move(global)); - TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer); - SimulateIncrementalMarking(); - // Finish GC to avoid removing the tracer while GC is running which may end up - // in an infinite loop because of unprocessed objects. - FullGC(); -} - -TEST_F(EmbedderTracingTest, BasicTracedReference) { - ManualGCScope manual_gc(i_isolate()); - v8::HandleScope scope(v8_isolate()); - TestEmbedderHeapTracer tracer; - heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer); - tracer.SetStackStart( - static_cast(base::Stack::GetCurrentFrameAddress())); - auto* traced_handles = i_isolate()->traced_handles(); - - const size_t initial_count = traced_handles->used_node_count(); - char* memory = new char[sizeof(v8::TracedReference)]; - auto* traced = new (memory) v8::TracedReference(); - { - v8::HandleScope new_scope(v8_isolate()); - v8::Local object(ConstructTraceableJSApiObject( - v8_isolate()->GetCurrentContext(), nullptr, nullptr)); - EXPECT_TRUE(traced->IsEmpty()); - *traced = v8::TracedReference(v8_isolate(), object); - EXPECT_FALSE(traced->IsEmpty()); - EXPECT_EQ(initial_count + 1, traced_handles->used_node_count()); - } - traced->~TracedReference(); - EXPECT_EQ(initial_count + 1, traced_handles->used_node_count()); - { - // Conservative scanning may find stale pointers to on-stack handles. - // Disable scanning, assuming the slots are overwritten. - DisableConservativeStackScanningScopeForTesting no_stack_scanning( - i_isolate()->heap()); - EmbedderStackStateScope scope = - EmbedderStackStateScope::ExplicitScopeForTesting( - reinterpret_cast(v8_isolate()) - ->heap() - ->local_embedder_heap_tracer(), - EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers); - FullGC(); - } - EXPECT_EQ(initial_count, traced_handles->used_node_count()); - delete[] memory; -} - -} // namespace heap -} // namespace internal -} // namespace v8 diff --git a/test/unittests/heap/heap-utils.h b/test/unittests/heap/heap-utils.h index 15fbb9ca91..633652617d 100644 --- a/test/unittests/heap/heap-utils.h +++ b/test/unittests/heap/heap-utils.h @@ -97,26 +97,6 @@ class WithHeapInternals : public TMixin, HeapInternalsBase { } }; -START_ALLOW_USE_DEPRECATED() - -class V8_NODISCARD TemporaryEmbedderHeapTracerScope { - public: - TemporaryEmbedderHeapTracerScope(v8::Isolate* isolate, - v8::EmbedderHeapTracer* tracer) - : isolate_(isolate) { - isolate_->SetEmbedderHeapTracer(tracer); - } - - ~TemporaryEmbedderHeapTracerScope() { - isolate_->SetEmbedderHeapTracer(nullptr); - } - - private: - v8::Isolate* const isolate_; -}; - -END_ALLOW_USE_DEPRECATED() - using TestWithHeapInternals = // WithHeapInternals< // WithInternalIsolateMixin< // From f941f98d4e74bd13a58c0548bb1c2c1deef7f635 Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Fri, 23 Dec 2022 17:30:03 +0100 Subject: [PATCH 111/654] [liftoff] Compute instance register more directly Just take the first GP parameter register; this is more efficient than going through the call descriptor. R=ahaas@chromium.org Bug: v8:13565 Change-Id: If0c6988c359511c07c5f41b7fa79e3e55d3d81c9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4111934 Reviewed-by: Andreas Haas Commit-Queue: Clemens Backes Cr-Commit-Position: refs/heads/main@{#85062} --- src/wasm/baseline/liftoff-assembler.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/wasm/baseline/liftoff-assembler.cc b/src/wasm/baseline/liftoff-assembler.cc index ae2fdf921f..3d883853a5 100644 --- a/src/wasm/baseline/liftoff-assembler.cc +++ b/src/wasm/baseline/liftoff-assembler.cc @@ -1036,8 +1036,6 @@ void LiftoffAssembler::PrepareCall(const ValueKindSig* sig, Register* target, Register* target_instance) { uint32_t num_params = static_cast(sig->parameter_count()); - // Input 0 is the call target. - constexpr size_t kInputShift = 1; // Spill all cache slots which are not being used as parameters. cache_state_.ClearAllCacheRegisters(); @@ -1056,10 +1054,12 @@ void LiftoffAssembler::PrepareCall(const ValueKindSig* sig, LiftoffRegList param_regs; // Move the target instance (if supplied) into the correct instance register. - compiler::LinkageLocation instance_loc = - call_descriptor->GetInputLocation(kInputShift); - DCHECK(instance_loc.IsRegister() && !instance_loc.IsAnyRegister()); - Register instance_reg = Register::from_code(instance_loc.AsRegister()); + Register instance_reg = wasm::kGpParamRegisters[0]; + // Check that the call descriptor agrees. Input 0 is the call target, 1 is the + // instance. + DCHECK_EQ( + instance_reg, + Register::from_code(call_descriptor->GetInputLocation(1).AsRegister())); param_regs.set(instance_reg); if (target_instance && *target_instance != instance_reg) { stack_transfers.MoveRegister(LiftoffRegister(instance_reg), From 9b9063dd22e050e694fd5ba3d5c19dddcb159310 Mon Sep 17 00:00:00 2001 From: pthier Date: Mon, 2 Jan 2023 15:46:18 +0100 Subject: [PATCH 112/654] [maglev][arm64] Port ToObject, ToString and ConvertReceiver Bug: v8:7700 Change-Id: I74465260ee7a1af69bdf17c5d02d897a30c7866a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4129531 Reviewed-by: Victor Gomes Commit-Queue: Patrick Thier Cr-Commit-Position: refs/heads/main@{#85063} --- src/maglev/arm64/maglev-ir-arm64.cc | 114 +++++++++++++++++++++++++++- 1 file changed, 111 insertions(+), 3 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index fad9c0d886..8f0b305c56 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -118,7 +118,48 @@ void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm, __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this); } -UNIMPLEMENTED_NODE_WITH_CALL(ConvertReceiver, mode_) +int ConvertReceiver::MaxCallStackArgs() const { + using D = CallInterfaceDescriptorFor::type; + return D::GetStackParameterCount(); +} +void ConvertReceiver::SetValueLocationConstraints() { + using D = CallInterfaceDescriptorFor::type; + UseFixed(receiver_input(), D::GetRegisterParameter(D::kInput)); + DefineAsFixed(this, kReturnRegister0); +} +void ConvertReceiver::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Label convert_to_object, done; + Register receiver = ToRegister(receiver_input()); + __ JumpIfSmi(receiver, &convert_to_object); + static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE); + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ JumpIfObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE, + &done, hs); + } + + if (mode_ != ConvertReceiverMode::kNotNullOrUndefined) { + Label convert_global_proxy; + __ JumpIfRoot(receiver, RootIndex::kUndefinedValue, &convert_global_proxy); + __ JumpIfNotRoot(receiver, RootIndex::kNullValue, &convert_to_object); + __ bind(&convert_global_proxy); + { + // Patch receiver to global proxy. + __ Move(ToRegister(result()), + target_.native_context().global_proxy_object().object()); + } + __ jmp(&done); + } + + __ bind(&convert_to_object); + // ToObject needs to be ran with the target context installed. + __ Move(kContextRegister, target_.context().object()); + __ CallBuiltin(Builtin::kToObject); + __ bind(&done); +} + UNIMPLEMENTED_NODE(LoadSignedIntDataViewElement, type_) UNIMPLEMENTED_NODE(LoadDoubleDataViewElement) UNIMPLEMENTED_NODE(LoadSignedIntTypedArrayElement, elements_kind_) @@ -126,8 +167,75 @@ UNIMPLEMENTED_NODE(LoadUnsignedIntTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(LoadDoubleTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(HoleyFloat64Box) UNIMPLEMENTED_NODE(SetPendingMessage) -UNIMPLEMENTED_NODE_WITH_CALL(ToObject) -UNIMPLEMENTED_NODE_WITH_CALL(ToString) + +int ToObject::MaxCallStackArgs() const { + using D = CallInterfaceDescriptorFor::type; + return D::GetStackParameterCount(); +} +void ToObject::SetValueLocationConstraints() { + using D = CallInterfaceDescriptorFor::type; + UseFixed(context(), kContextRegister); + UseFixed(value_input(), D::GetRegisterParameter(D::kInput)); + DefineAsFixed(this, kReturnRegister0); +} +void ToObject::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { +#ifdef DEBUG + using D = CallInterfaceDescriptorFor::type; + DCHECK_EQ(ToRegister(context()), kContextRegister); + DCHECK_EQ(ToRegister(value_input()), D::GetRegisterParameter(D::kInput)); +#endif // DEBUG + Register value = ToRegister(value_input()); + Label call_builtin, done; + // Avoid the builtin call if {value} is a JSReceiver. + __ JumpIfSmi(value, &call_builtin); + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ LoadMap(scratch, value); + __ CompareInstanceType(scratch, scratch.W(), FIRST_JS_RECEIVER_TYPE); + __ B(&done, hs); + } + __ bind(&call_builtin); + __ CallBuiltin(Builtin::kToObject); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); + __ bind(&done); +} + +int ToString::MaxCallStackArgs() const { + using D = CallInterfaceDescriptorFor::type; + return D::GetStackParameterCount(); +} +void ToString::SetValueLocationConstraints() { + using D = CallInterfaceDescriptorFor::type; + UseFixed(context(), kContextRegister); + UseFixed(value_input(), D::GetRegisterParameter(D::kO)); + DefineAsFixed(this, kReturnRegister0); +} +void ToString::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { +#ifdef DEBUG + using D = CallInterfaceDescriptorFor::type; + DCHECK_EQ(ToRegister(context()), kContextRegister); + DCHECK_EQ(ToRegister(value_input()), D::GetRegisterParameter(D::kO)); +#endif // DEBUG + Register value = ToRegister(value_input()); + Label call_builtin, done; + // Avoid the builtin call if {value} is a string. + __ JumpIfSmi(value, &call_builtin); + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ LoadMap(scratch, value); + __ CompareInstanceType(scratch, scratch.W(), FIRST_NONSTRING_TYPE); + __ B(&done, lo); + } + __ bind(&call_builtin); + __ CallBuiltin(Builtin::kToString); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); + __ bind(&done); +} + UNIMPLEMENTED_NODE(AssertInt32, condition_, reason_) UNIMPLEMENTED_NODE(CheckUint32IsSmi) UNIMPLEMENTED_NODE(CheckJSArrayBounds) From 03ad044ce58b73d206032ddfdc2e259c17555a2e Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Mon, 2 Jan 2023 16:13:14 +0100 Subject: [PATCH 113/654] [fuzzer][liftoff] Account for locals in max_steps The fuzzer limits the number of "steps" that should be executed in Liftoff. A "step" typically means one Wasm instruction. The cost of function calls is linear in the number of parameters and locals though, so that should be accounted for. In the linked issue (timeout), we were repeatedly calling a function with a big number of reference locals, which all need to be initialized to the null value. R=thibaudm@chromium.org Bug: chromium:1399868 Change-Id: Id071aeee6a0b2670b926880744ea82cc37881876 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4118547 Reviewed-by: Thibaud Michaud Commit-Queue: Clemens Backes Cr-Commit-Position: refs/heads/main@{#85064} --- src/wasm/baseline/liftoff-compiler.cc | 53 ++++++++++++++++----------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index f1894df85d..a9b9a64864 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -901,6 +901,12 @@ class LiftoffCompiler { if (for_debugging_) __ ResetOSRTarget(); + if (V8_UNLIKELY(max_steps_)) { + // Subtract 16 steps for the function call itself (including the function + // prologue), plus 1 for each local (including parameters). + CheckMaxSteps(decoder, 16 + __ num_locals()); + } + if (num_params) { CODE_COMMENT("process parameters"); ParameterProcessor processor(this, num_params); @@ -1099,6 +1105,31 @@ class LiftoffCompiler { asm_.AbortCompilation(); } + void CheckMaxSteps(FullDecoder* decoder, int steps_done = 1) { + DCHECK_LE(1, steps_done); + CODE_COMMENT("check max steps"); + LiftoffRegList pinned; + LiftoffRegister max_steps = pinned.set(__ GetUnusedRegister(kGpReg, {})); + LiftoffRegister max_steps_addr = + pinned.set(__ GetUnusedRegister(kGpReg, pinned)); + { + FREEZE_STATE(frozen); + __ LoadConstant( + max_steps_addr, + WasmValue::ForUintPtr(reinterpret_cast(max_steps_))); + __ Load(max_steps, max_steps_addr.gp(), no_reg, 0, LoadType::kI32Load); + Label cont; + __ emit_i32_cond_jumpi(kSignedGreaterEqual, &cont, max_steps.gp(), + steps_done, frozen); + // Abort. + Trap(decoder, kTrapUnreachable); + __ bind(&cont); + } + __ emit_i32_subi(max_steps.gp(), max_steps.gp(), steps_done); + __ Store(max_steps_addr.gp(), no_reg, 0, max_steps, StoreType::kI32Store, + pinned); + } + V8_NOINLINE void EmitDebuggingInfo(FullDecoder* decoder, WasmOpcode opcode) { DCHECK(for_debugging_); if (!WasmOpcodes::IsBreakable(opcode)) return; @@ -1162,27 +1193,7 @@ class LiftoffCompiler { __ bind(&cont); } if (V8_UNLIKELY(max_steps_ != nullptr)) { - CODE_COMMENT("check max steps"); - LiftoffRegList pinned; - LiftoffRegister max_steps = __ GetUnusedRegister(kGpReg, {}); - pinned.set(max_steps); - LiftoffRegister max_steps_addr = __ GetUnusedRegister(kGpReg, pinned); - pinned.set(max_steps_addr); - { - FREEZE_STATE(frozen); - __ LoadConstant( - max_steps_addr, - WasmValue::ForUintPtr(reinterpret_cast(max_steps_))); - __ Load(max_steps, max_steps_addr.gp(), no_reg, 0, LoadType::kI32Load); - Label cont; - __ emit_i32_cond_jumpi(kUnequal, &cont, max_steps.gp(), 0, frozen); - // Abort. - Trap(decoder, kTrapUnreachable); - __ bind(&cont); - } - __ emit_i32_subi(max_steps.gp(), max_steps.gp(), 1); - __ Store(max_steps_addr.gp(), no_reg, 0, max_steps, StoreType::kI32Store, - pinned); + CheckMaxSteps(decoder); } } From ba8eec7da00e67d41cce279ecbb940bf8210ab37 Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Mon, 2 Jan 2023 14:42:49 +0100 Subject: [PATCH 114/654] [wasm] Report decommit failure as OOM If we are near OOM, the {DecommitPages} call can actually fail. Call {FatalProcessOutOfMemory} in that case to get a proper OOM crash signature. To protect against bugs in the implementation, we add a check that decommitting only fails with the ENOMEM error. R=mlippautz@chromium.org Bug: chromium:1403519 Change-Id: I54fabd1efa566cc1c474974577ec16f75cd3d726 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4118548 Reviewed-by: Michael Lippautz Commit-Queue: Clemens Backes Cr-Commit-Position: refs/heads/main@{#85065} --- src/base/platform/platform-posix.cc | 9 +++++++-- src/wasm/wasm-code-manager.cc | 10 ++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/base/platform/platform-posix.cc b/src/base/platform/platform-posix.cc index 5e356c521a..43e3268d11 100644 --- a/src/base/platform/platform-posix.cc +++ b/src/base/platform/platform-posix.cc @@ -582,9 +582,14 @@ bool OS::DecommitPages(void* address, size_t size) { // shall be removed, as if by an appropriate call to munmap(), before the new // mapping is established." As a consequence, the memory will be // zero-initialized on next access. - void* ptr = mmap(address, size, PROT_NONE, + void* ret = mmap(address, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - return ptr == address; + if (V8_UNLIKELY(ret == MAP_FAILED)) { + CHECK_EQ(ENOMEM, errno); + return false; + } + CHECK_EQ(ret, address); + return true; } #endif // !defined(_AIX) diff --git a/src/wasm/wasm-code-manager.cc b/src/wasm/wasm-code-manager.cc index 55670172aa..015f76929a 100644 --- a/src/wasm/wasm-code-manager.cc +++ b/src/wasm/wasm-code-manager.cc @@ -2010,8 +2010,14 @@ void WasmCodeManager::Decommit(base::AddressRegion region) { USE(old_committed); TRACE_HEAP("Decommitting system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n", region.begin(), region.end()); - CHECK(allocator->DecommitPages(reinterpret_cast(region.begin()), - region.size())); + if (V8_UNLIKELY(!allocator->DecommitPages( + reinterpret_cast(region.begin()), region.size()))) { + // Decommit can fail in near-OOM situations. + auto oom_detail = base::FormattedString{} << "region size: " + << region.size(); + V8::FatalProcessOutOfMemory(nullptr, "Decommit Wasm code space", + oom_detail.PrintToArray().data()); + } } void WasmCodeManager::AssignRange(base::AddressRegion region, From 15c726bd630d94499ccc5b1ed39c0f24160cbf25 Mon Sep 17 00:00:00 2001 From: Nikolaos Papaspyrou Date: Mon, 2 Jan 2023 16:58:27 +0100 Subject: [PATCH 115/654] [heap] Merge mechanisms for disabling CSS EmbedderStackStateScope is used to disable conservative stack scanning for cppgc when the stack is known to not contain heap pointers. Also, DisableConservativeStackScanningScopeForTesting is used to disable CSS for the V8 heap in tests that assume a precise GC. Until now, these two have used two different mechanisms for disabling CSS. This CL merges the two mechanisms and implements the latter scope via the former. This is a reland of commit f51e0bb1db67cfa1b4ac11b13e5cbee0b8601149 reviewed on https://chromium-review.googlesource.com/c/v8/v8/+/4111954 Bug: v8:13257 Change-Id: Ia124a4201686e0ea79f9cd07bc3888b9781cafa9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128141 Reviewed-by: Michael Lippautz Commit-Queue: Nikolaos Papaspyrou Cr-Commit-Position: refs/heads/main@{#85066} --- src/heap/heap.cc | 9 +++--- src/heap/heap.h | 29 +++++++------------ src/heap/mark-compact.cc | 3 +- .../heap/cppgc-js/unified-heap-unittest.cc | 22 +++++++++----- 4 files changed, 30 insertions(+), 33 deletions(-) diff --git a/src/heap/heap.cc b/src/heap/heap.cc index c971669f05..393aa02165 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -4869,11 +4869,10 @@ void Heap::IterateStackRoots(RootVisitor* v, StackState stack_state) { isolate_->Iterate(v); #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING - if (stack_state == StackState::kMayContainHeapPointers && - !disable_conservative_stack_scanning_for_testing_) { - ConservativeStackVisitor stack_visitor(isolate(), v); - stack().IteratePointers(&stack_visitor); - } + if (stack_state == StackState::kNoHeapPointers || !IsGCWithStack()) return; + + ConservativeStackVisitor stack_visitor(isolate_, v); + stack().IteratePointers(&stack_visitor); #endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING } diff --git a/src/heap/heap.h b/src/heap/heap.h index 0df9e0ad15..a46186041c 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -2393,7 +2393,6 @@ class Heap { bool force_oom_ = false; bool force_gc_on_next_allocation_ = false; bool delay_sweeper_tasks_for_testing_ = false; - bool disable_conservative_stack_scanning_for_testing_ = false; UnorderedHeapObjectMap retainer_; UnorderedHeapObjectMap retaining_root_; @@ -2670,23 +2669,6 @@ class V8_EXPORT_PRIVATE V8_NODISCARD SaveStackContextScope { ::heap::base::Stack* stack_; }; -class V8_NODISCARD DisableConservativeStackScanningScopeForTesting { - public: - explicit inline DisableConservativeStackScanningScopeForTesting(Heap* heap) - : heap_(heap), - old_value_(heap_->disable_conservative_stack_scanning_for_testing_) { - heap_->disable_conservative_stack_scanning_for_testing_ = true; - } - - inline ~DisableConservativeStackScanningScopeForTesting() { - heap_->disable_conservative_stack_scanning_for_testing_ = old_value_; - } - - protected: - Heap* heap_; - bool old_value_; -}; - // Space iterator for iterating over all the paged spaces of the heap: Map // space, old space and code space. Returns each space in turn, and null when it // is done. @@ -2842,6 +2824,17 @@ class V8_EXPORT_PRIVATE V8_NODISCARD EmbedderStackStateScope final { const StackState old_stack_state_; }; +class V8_NODISCARD DisableConservativeStackScanningScopeForTesting { + public: + explicit inline DisableConservativeStackScanningScopeForTesting(Heap* heap) + : embedder_scope_(EmbedderStackStateScope::ExplicitScopeForTesting( + heap->local_embedder_heap_tracer(), + cppgc::EmbedderStackState::kNoHeapPointers)) {} + + private: + EmbedderStackStateScope embedder_scope_; +}; + class V8_NODISCARD CppClassNamesAsHeapObjectNameScope final { public: explicit CppClassNamesAsHeapObjectNameScope(v8::CppHeap* heap); diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 4f0493830c..c39bbbbd85 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -2115,8 +2115,7 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor) { // // TODO(v8:v8:13207): Remove as this is not required when using `CppHeap`. auto& stack = heap()->stack(); - if (heap_->local_embedder_heap_tracer()->embedder_stack_state() == - cppgc::EmbedderStackState::kMayContainHeapPointers) { + if (heap_->IsGCWithStack()) { ConservativeTracedHandlesMarkingVisitor conservative_marker( *heap_, *local_marking_worklists_, cppgc::internal::CollectionType::kMajor); diff --git a/test/unittests/heap/cppgc-js/unified-heap-unittest.cc b/test/unittests/heap/cppgc-js/unified-heap-unittest.cc index 3439542e3b..3934eb8b00 100644 --- a/test/unittests/heap/cppgc-js/unified-heap-unittest.cc +++ b/test/unittests/heap/cppgc-js/unified-heap-unittest.cc @@ -517,12 +517,6 @@ V8_NOINLINE void StackToHeapTest(v8::Isolate* v8_isolate, Operation op, // Disable scanning, assuming the slots are overwritten. DisableConservativeStackScanningScopeForTesting no_stack_scanning( reinterpret_cast(v8_isolate)->heap()); - EmbedderStackStateScope scope = - EmbedderStackStateScope::ExplicitScopeForTesting( - reinterpret_cast(v8_isolate) - ->heap() - ->local_embedder_heap_tracer(), - cppgc::EmbedderStackState::kNoHeapPointers); FullGC(v8_isolate); } ASSERT_TRUE(observer.IsEmpty()); @@ -565,7 +559,13 @@ V8_NOINLINE void HeapToStackTest(v8::Isolate* v8_isolate, Operation op, FullGC(v8_isolate); EXPECT_FALSE(observer.IsEmpty()); stack_handle.Reset(); - FullGC(v8_isolate); + { + // Conservative scanning may find stale pointers to on-stack handles. + // Disable scanning, assuming the slots are overwritten. + DisableConservativeStackScanningScopeForTesting no_stack_scanning( + reinterpret_cast(v8_isolate)->heap()); + FullGC(v8_isolate); + } EXPECT_TRUE(observer.IsEmpty()); } @@ -603,7 +603,13 @@ V8_NOINLINE void StackToStackTest(v8::Isolate* v8_isolate, Operation op, FullGC(v8_isolate); EXPECT_FALSE(observer.IsEmpty()); stack_handle2.Reset(); - FullGC(v8_isolate); + { + // Conservative scanning may find stale pointers to on-stack handles. + // Disable scanning, assuming the slots are overwritten. + DisableConservativeStackScanningScopeForTesting no_stack_scanning( + reinterpret_cast(v8_isolate)->heap()); + FullGC(v8_isolate); + } EXPECT_TRUE(observer.IsEmpty()); } From a5acddef26409dfcf95bfe5c8a04065c03784864 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Mon, 2 Jan 2023 19:08:43 -0800 Subject: [PATCH 116/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/7ba88a0..5129278 Rolling v8/third_party/depot_tools: https://chromium.googlesource.com/chromium/tools/depot_tools/+log/03af44a..5b0c934 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20230101.3.1..version:11.20230102.2.1 Change-Id: I4e5f8192c181d81ab7f241583e87fdfff2ca5d92 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4131422 Bot-Commit: v8-ci-autoroll-builder Commit-Queue: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85067} --- DEPS | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/DEPS b/DEPS index 2ed57ed318..445ebcac13 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20230101.3.1', + 'fuchsia_version': 'version:11.20230102.2.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '7ba88a00bd9bce363068803418a79da44f4fa0db', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '51292789ecae63df94fa2058407cdc7c7b886b6c', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf8d11e41138ca777f3eaa09df41bf968c8be6ba', 'buildtools/clang_format/script': @@ -217,7 +217,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '03af44a5163e9448e375a6bbe7bef1fc0e2bb205', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '5b0c93402623ee632077ae073555867a0f984fc6', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { From e3828ab08812b5dbc202a95f3601d3a16153825a Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Tue, 3 Jan 2023 10:02:50 +0000 Subject: [PATCH 117/654] Revert "[foozzie] Temporarily lower the amount of --future tests" This reverts commit 6b2c271cfb7d96b7db47c2d4ab36751b07332a8e. Reason for revert: All open bugs were fixed. Original change's description: > [foozzie] Temporarily lower the amount of --future tests > > Drop --future from 25% to 5% for a few days until all currently open > correctness cases associated with --future are fixed. > > No-Try: true > Bug: v8:7700 > Change-Id: I161a0adbc767c5cec46409443fe58c634531487c > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4114292 > Reviewed-by: Toon Verwaest > Commit-Queue: Michael Achenbach > Cr-Commit-Position: refs/heads/main@{#85010} Change-Id: I804a4c33922595e380bdd11150ff826d6669d846 No-Try: true Bug: v8:7700 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128527 Commit-Queue: Michael Achenbach Bot-Commit: Rubber Stamper Reviewed-by: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85068} --- tools/clusterfuzz/foozzie/v8_fuzz_flags.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/clusterfuzz/foozzie/v8_fuzz_flags.json b/tools/clusterfuzz/foozzie/v8_fuzz_flags.json index 3547de27e7..c7e39668c8 100644 --- a/tools/clusterfuzz/foozzie/v8_fuzz_flags.json +++ b/tools/clusterfuzz/foozzie/v8_fuzz_flags.json @@ -10,7 +10,7 @@ [0.01, "--thread-pool-size=4"], [0.01, "--thread-pool-size=8"], [0.1, "--interrupt-budget=1000"], - [0.05, "--future"], + [0.25, "--future"], [0.2, "--no-regexp-tier-up"], [0.1, "--regexp-interpret-all"], [0.1, "--regexp-tier-up-ticks=10"], From 6f29973f20ff22176a802334ad797ce34b0078bb Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Tue, 3 Jan 2023 11:00:04 +0100 Subject: [PATCH 118/654] [maglev][arm64] Share [Holey]Float64Box ... and CheckJSArrayBounds. Also remove unused CmpObjectType in macro assembler. Bug: v8:7700 Change-Id: I44297fd01146d68643222ad270391c597d0cbe66 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128093 Commit-Queue: Victor Gomes Auto-Submit: Victor Gomes Reviewed-by: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85069} --- src/codegen/arm64/macro-assembler-arm64.h | 5 --- src/maglev/arm64/maglev-assembler-arm64-inl.h | 14 +++++++ src/maglev/arm64/maglev-ir-arm64.cc | 38 ++++++++++++------- src/maglev/maglev-assembler.h | 2 + src/maglev/maglev-ir.cc | 34 +++++++++++++++++ src/maglev/x64/maglev-assembler-x64-inl.h | 12 ++++++ src/maglev/x64/maglev-ir-x64.cc | 34 ----------------- 7 files changed, 86 insertions(+), 53 deletions(-) diff --git a/src/codegen/arm64/macro-assembler-arm64.h b/src/codegen/arm64/macro-assembler-arm64.h index 52639a0096..89be4d2f49 100644 --- a/src/codegen/arm64/macro-assembler-arm64.h +++ b/src/codegen/arm64/macro-assembler-arm64.h @@ -2011,11 +2011,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // other registers. void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type); - // A version of CompareObjectType which does not set the {type_reg} and has - // the same signatureas the x64 version of CmpObjectType. - void CmpObjectType(Register heap_object, InstanceType type, Register map) { - CompareObjectType(heap_object, map, xzr, type); - } // Compare object type for heap object, and branch if equal (or not.) // heap_object contains a non-Smi whose object type should be compared with diff --git a/src/maglev/arm64/maglev-assembler-arm64-inl.h b/src/maglev/arm64/maglev-assembler-arm64-inl.h index 8d91aabca4..95638508d1 100644 --- a/src/maglev/arm64/maglev-assembler-arm64-inl.h +++ b/src/maglev/arm64/maglev-assembler-arm64-inl.h @@ -249,6 +249,20 @@ inline void MaglevAssembler::BindBlock(BasicBlock* block) { } } +inline void MaglevAssembler::DoubleToInt64Repr(Register dst, + DoubleRegister src) { + Mov(dst, src, 0); +} + +inline Condition MaglevAssembler::IsInt64Constant(Register reg, + int64_t constant) { + UseScratchRegisterScope temps(this); + Register scratch = temps.AcquireX(); + Mov(scratch, kHoleNanInt64); + Cmp(reg, scratch); + return eq; +} + inline Condition MaglevAssembler::IsRootConstant(Input input, RootIndex root_index) { if (input.operand().IsRegister()) { diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 8f0b305c56..d052ff6bb1 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -165,7 +165,6 @@ UNIMPLEMENTED_NODE(LoadDoubleDataViewElement) UNIMPLEMENTED_NODE(LoadSignedIntTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(LoadUnsignedIntTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(LoadDoubleTypedArrayElement, elements_kind_) -UNIMPLEMENTED_NODE(HoleyFloat64Box) UNIMPLEMENTED_NODE(SetPendingMessage) int ToObject::MaxCallStackArgs() const { @@ -238,7 +237,6 @@ void ToString::GenerateCode(MaglevAssembler* masm, UNIMPLEMENTED_NODE(AssertInt32, condition_, reason_) UNIMPLEMENTED_NODE(CheckUint32IsSmi) -UNIMPLEMENTED_NODE(CheckJSArrayBounds) UNIMPLEMENTED_NODE(CheckJSDataViewBounds, element_type_) UNIMPLEMENTED_NODE(CheckJSObjectElementsBounds) UNIMPLEMENTED_NODE(CheckJSTypedArrayBounds, elements_kind_) @@ -352,6 +350,29 @@ void CheckedUint32ToInt32::GenerateCode(MaglevAssembler* masm, __ Tbnz(input_reg, 31, fail); } +void CheckJSArrayBounds::SetValueLocationConstraints() { + UseRegister(receiver_input()); + UseRegister(index_input()); +} +void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(receiver_input()); + Register index = ToRegister(index_input()); + __ AssertNotSmi(object); + + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + + if (v8_flags.debug_code) { + __ CompareObjectType(object, scratch, scratch, JS_ARRAY_TYPE); + __ Assert(eq, AbortReason::kUnexpectedValue); + } + + __ SmiUntagField(scratch, FieldMemOperand(object, JSArray::kLengthOffset)); + __ Cmp(index, scratch); + __ EmitEagerDeoptIf(hs, DeoptimizeReason::kOutOfBounds, this); +} + void ChangeInt32ToFloat64::SetValueLocationConstraints() { UseRegister(input()); DefineAsRegister(this); @@ -678,7 +699,7 @@ void CheckSymbol::GenerateCode(MaglevAssembler* masm, } UseScratchRegisterScope temps(masm); Register scratch = temps.AcquireX(); - __ CmpObjectType(object, SYMBOL_TYPE, scratch); + __ CompareObjectType(object, scratch, scratch, SYMBOL_TYPE); __ EmitEagerDeoptIf(ne, DeoptimizeReason::kNotASymbol, this); } @@ -1448,17 +1469,6 @@ void UnsafeSmiTag::GenerateCode(MaglevAssembler* masm, } } -void Float64Box::SetValueLocationConstraints() { - UseRegister(input()); - DefineAsRegister(this); -} -void Float64Box::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - DoubleRegister value = ToDoubleRegister(input()); - Register object = ToRegister(result()); - __ AllocateHeapNumber(register_snapshot(), object, value); -} - void CheckedFloat64Unbox::SetValueLocationConstraints() { UseRegister(input()); DefineAsRegister(this); diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index b21ff1d043..94b838b73f 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -80,6 +80,7 @@ class MaglevAssembler : public MacroAssembler { inline void BindBlock(BasicBlock* block); + inline Condition IsInt64Constant(Register reg, int64_t constant); inline Condition IsRootConstant(Input input, RootIndex root_index); inline void Branch(Condition condition, BasicBlock* if_true, @@ -111,6 +112,7 @@ class MaglevAssembler : public MacroAssembler { void ToBoolean(Register value, ZoneLabelRef is_true, ZoneLabelRef is_false, bool fallthrough_when_true); + inline void DoubleToInt64Repr(Register dst, DoubleRegister src); void TruncateDoubleToInt32(Register dst, DoubleRegister src); inline void DefineLazyDeoptPoint(LazyDeoptInfo* info); diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 4032ad8e42..f8099f7a77 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -1508,6 +1508,40 @@ void GetKeyedGeneric::GenerateCode(MaglevAssembler* masm, masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } +void Float64Box::SetValueLocationConstraints() { + UseRegister(input()); + DefineAsRegister(this); +} +void Float64Box::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + DoubleRegister value = ToDoubleRegister(input()); + Register object = ToRegister(result()); + __ AllocateHeapNumber(register_snapshot(), object, value); +} + +void HoleyFloat64Box::SetValueLocationConstraints() { + UseRegister(input()); + DefineAsRegister(this); +} +void HoleyFloat64Box::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + ZoneLabelRef done(masm); + DoubleRegister value = ToDoubleRegister(input()); + // Using return as scratch register. + Register repr = ToRegister(result()); + Register object = ToRegister(result()); + __ DoubleToInt64Repr(repr, value); + __ JumpToDeferredIf( + __ IsInt64Constant(repr, kHoleNanInt64), + [](MaglevAssembler* masm, Register object, ZoneLabelRef done) { + __ LoadRoot(object, RootIndex::kUndefinedValue); + __ Jump(*done); + }, + object, done); + __ AllocateHeapNumber(register_snapshot(), object, value); + __ bind(*done); +} + void StoreTaggedFieldNoWriteBarrier::SetValueLocationConstraints() { UseRegister(object_input()); UseRegister(value_input()); diff --git a/src/maglev/x64/maglev-assembler-x64-inl.h b/src/maglev/x64/maglev-assembler-x64-inl.h index 6be20a939e..69bd5d44d6 100644 --- a/src/maglev/x64/maglev-assembler-x64-inl.h +++ b/src/maglev/x64/maglev-assembler-x64-inl.h @@ -133,6 +133,18 @@ inline void MaglevAssembler::BindBlock(BasicBlock* block) { bind(block->label()); } +inline void MaglevAssembler::DoubleToInt64Repr(Register dst, + DoubleRegister src) { + Movq(dst, src); +} + +inline Condition MaglevAssembler::IsInt64Constant(Register reg, + int64_t constant) { + movq(kScratchRegister, kHoleNanInt64); + cmpq(reg, kScratchRegister); + return equal; +} + inline Condition MaglevAssembler::IsRootConstant(Input input, RootIndex root_index) { if (input.operand().IsRegister()) { diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 8ff4993ac6..0c565fa65f 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -2090,40 +2090,6 @@ void Uint32ToNumber::GenerateCode(MaglevAssembler* masm, __ bind(*done); } -void Float64Box::SetValueLocationConstraints() { - UseRegister(input()); - DefineAsRegister(this); -} -void Float64Box::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - DoubleRegister value = ToDoubleRegister(input()); - Register object = ToRegister(result()); - __ AllocateHeapNumber(register_snapshot(), object, value); -} - -void HoleyFloat64Box::SetValueLocationConstraints() { - UseRegister(input()); - DefineAsRegister(this); -} -void HoleyFloat64Box::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - ZoneLabelRef done(masm); - DoubleRegister value = ToDoubleRegister(input()); - Register object = ToRegister(result()); - __ movq(object, value); - __ movq(kScratchRegister, kHoleNanInt64); - __ cmpq(object, kScratchRegister); - __ JumpToDeferredIf( - equal, - [](MaglevAssembler* masm, Register object, ZoneLabelRef done) { - __ LoadRoot(object, RootIndex::kUndefinedValue); - __ jmp(*done); - }, - object, done); - __ AllocateHeapNumber(register_snapshot(), object, value); - __ bind(*done); -} - void CheckedFloat64Unbox::SetValueLocationConstraints() { UseRegister(input()); DefineAsRegister(this); From 45277889e8fcb0b875cdbb70ef5093e0957d6326 Mon Sep 17 00:00:00 2001 From: pthier Date: Tue, 3 Jan 2023 12:06:21 +0100 Subject: [PATCH 119/654] [maglev][arm64] Make CheckUint32IsSmi arch agnostic and port AssertInt32 Bug: v8:7700 Change-Id: I9710adb42b56c7df18bcb0570dc7693558f655dc Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128507 Reviewed-by: Victor Gomes Commit-Queue: Patrick Thier Cr-Commit-Position: refs/heads/main@{#85070} --- src/maglev/arm64/maglev-ir-arm64.cc | 12 ++++++++++-- src/maglev/maglev-ir.cc | 10 ++++++++++ src/maglev/x64/maglev-ir-x64.cc | 11 +---------- 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index d052ff6bb1..c294efd732 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -235,8 +235,16 @@ void ToString::GenerateCode(MaglevAssembler* masm, __ bind(&done); } -UNIMPLEMENTED_NODE(AssertInt32, condition_, reason_) -UNIMPLEMENTED_NODE(CheckUint32IsSmi) +void AssertInt32::SetValueLocationConstraints() { + UseRegister(left_input()); + UseRegister(right_input()); +} +void AssertInt32::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + __ Cmp(ToRegister(left_input()).W(), ToRegister(right_input()).W()); + __ Check(ToCondition(condition_), reason_); +} + UNIMPLEMENTED_NODE(CheckJSDataViewBounds, element_type_) UNIMPLEMENTED_NODE(CheckJSObjectElementsBounds) UNIMPLEMENTED_NODE(CheckJSTypedArrayBounds, elements_kind_) diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index f8099f7a77..f60213452e 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -744,6 +744,16 @@ void GapMove::GenerateCode(MaglevAssembler* masm, } } +void CheckUint32IsSmi::SetValueLocationConstraints() { UseRegister(input()); } +void CheckUint32IsSmi::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register reg = ToRegister(input()); + // Perform an unsigned comparison against Smi::kMaxValue. + __ Cmp(reg, Smi::kMaxValue); + __ EmitEagerDeoptIf(ToCondition(AssertCondition::kAbove), + DeoptimizeReason::kNotASmi, this); +} + void CheckedSmiUntag::SetValueLocationConstraints() { UseRegister(input()); DefineSameAsFirst(this); diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 0c565fa65f..42c2e9868a 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -218,7 +218,7 @@ void AssertInt32::SetValueLocationConstraints() { } void AssertInt32::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { - __ cmpq(ToRegister(left_input()), ToRegister(right_input())); + __ cmpl(ToRegister(left_input()), ToRegister(right_input())); __ Check(ToCondition(condition_), reason_); } @@ -1982,15 +1982,6 @@ void CheckInt32IsSmi::GenerateCode(MaglevAssembler* masm, __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kNotASmi, this); } -void CheckUint32IsSmi::SetValueLocationConstraints() { UseRegister(input()); } -void CheckUint32IsSmi::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - Register reg = ToRegister(input()); - // Perform an unsigned comparison against Smi::kMaxValue. - __ cmpl(reg, Immediate(Smi::kMaxValue)); - __ EmitEagerDeoptIf(above, DeoptimizeReason::kNotASmi, this); -} - void CheckedSmiTagInt32::SetValueLocationConstraints() { UseAndClobberRegister(input()); DefineSameAsFirst(this); From 5694514c5c41c6bcd647e90b2a80f4def83cd5e6 Mon Sep 17 00:00:00 2001 From: Darius M Date: Tue, 3 Jan 2023 13:09:47 +0100 Subject: [PATCH 120/654] [maglev][arm64] Implement DataView IRs Bug: v8:7700 Change-Id: I90a837981d56f3bb26814e243b8e8fe2c324a3ca Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128526 Commit-Queue: Darius Mercadier Reviewed-by: Victor Gomes Cr-Commit-Position: refs/heads/main@{#85071} --- src/maglev/arm64/maglev-assembler-arm64-inl.h | 55 ++++ src/maglev/arm64/maglev-ir-arm64.cc | 273 +++++++++++++++++- src/maglev/maglev-assembler.h | 6 +- src/maglev/maglev-ir.cc | 15 + src/maglev/maglev-ir.h | 14 + src/maglev/x64/maglev-ir-x64.cc | 30 -- 6 files changed, 355 insertions(+), 38 deletions(-) diff --git a/src/maglev/arm64/maglev-assembler-arm64-inl.h b/src/maglev/arm64/maglev-assembler-arm64-inl.h index 95638508d1..369f8336ed 100644 --- a/src/maglev/arm64/maglev-assembler-arm64-inl.h +++ b/src/maglev/arm64/maglev-assembler-arm64-inl.h @@ -313,6 +313,61 @@ inline MemOperand MaglevAssembler::ToMemOperand(const ValueLocation& location) { return ToMemOperand(location.operand()); } +inline void MaglevAssembler::LoadBoundedSizeFromObject(Register result, + Register object, + int offset) { + Move(result, FieldMemOperand(object, offset)); +#ifdef V8_ENABLE_SANDBOX + Lsl(result, result, kBoundedSizeShift); +#endif // V8_ENABLE_SANDBOX +} + +inline void MaglevAssembler::LoadExternalPointerField(Register result, + MemOperand operand) { +#ifdef V8_ENABLE_SANDBOX + LoadSandboxedPointerField(result, operand); +#else + Move(result, operand); +#endif +} + +inline void MaglevAssembler::LoadSignedField(Register result, + MemOperand operand, int size) { + if (size == 1) { + ldrsb(result, operand); + } else if (size == 2) { + ldrsh(result, operand); + } else { + DCHECK_EQ(size, 4); + DCHECK(result.IsW()); + ldr(result, operand); + } +} + +inline void MaglevAssembler::StoreField(MemOperand operand, Register value, + int size) { + DCHECK(size == 1 || size == 2 || size == 4); + if (size == 1) { + strb(value, operand); + } else if (size == 2) { + strh(value, operand); + } else { + DCHECK_EQ(size, 4); + DCHECK(value.IsW()); + str(value, operand); + } +} + +inline void MaglevAssembler::ReverseByteOrder(Register value, int size) { + if (size == 2) { + Rev16(value, value); + } else if (size == 4) { + Rev32(value, value); + } else { + DCHECK_EQ(size, 1); + } +} + inline void MaglevAssembler::Move(StackSlot dst, Register src) { Str(src, StackSlotOperand(dst)); } diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index c294efd732..e763bdad32 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -160,8 +160,6 @@ void ConvertReceiver::GenerateCode(MaglevAssembler* masm, __ bind(&done); } -UNIMPLEMENTED_NODE(LoadSignedIntDataViewElement, type_) -UNIMPLEMENTED_NODE(LoadDoubleDataViewElement) UNIMPLEMENTED_NODE(LoadSignedIntTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(LoadUnsignedIntTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(LoadDoubleTypedArrayElement, elements_kind_) @@ -245,12 +243,9 @@ void AssertInt32::GenerateCode(MaglevAssembler* masm, __ Check(ToCondition(condition_), reason_); } -UNIMPLEMENTED_NODE(CheckJSDataViewBounds, element_type_) UNIMPLEMENTED_NODE(CheckJSObjectElementsBounds) UNIMPLEMENTED_NODE(CheckJSTypedArrayBounds, elements_kind_) UNIMPLEMENTED_NODE_WITH_CALL(JumpLoopPrologue, loop_depth_, unit_) -UNIMPLEMENTED_NODE(StoreSignedIntDataViewElement, type_) -UNIMPLEMENTED_NODE(StoreDoubleDataViewElement) int BuiltinStringFromCharCode::MaxCallStackArgs() const { return AllocateDescriptor::GetStackParameterCount(); @@ -1402,6 +1397,33 @@ void CheckedSmiTagUint32::GenerateCode(MaglevAssembler* masm, __ Assert(vc, AbortReason::kInputDoesNotFitSmi); } +void CheckJSDataViewBounds::SetValueLocationConstraints() { + UseRegister(receiver_input()); + UseRegister(index_input()); +} +void CheckJSDataViewBounds::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(receiver_input()); + Register index = ToRegister(index_input()); + Register byte_length = kScratchRegister; + if (v8_flags.debug_code) { + __ AssertNotSmi(object); + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ CompareObjectType(object, scratch, scratch, JS_DATA_VIEW_TYPE); + __ Assert(eq, AbortReason::kUnexpectedValue); + } + __ LoadBoundedSizeFromObject(byte_length, object, + JSDataView::kRawByteLengthOffset); + int element_size = ExternalArrayElementSize(element_type_); + if (element_size > 1) { + __ Cmp(byte_length, Immediate(element_size - 1)); + __ EmitEagerDeoptIf(mi, DeoptimizeReason::kOutOfBounds, this); + } + __ Cmp(index, byte_length); + __ EmitEagerDeoptIf(hs, DeoptimizeReason::kOutOfBounds, this); +} + void CheckedInternalizedString::SetValueLocationConstraints() { UseRegister(object_input()); DefineSameAsFirst(this); @@ -1838,6 +1860,247 @@ void StoreMap::GenerateCode(MaglevAssembler* masm, __ bind(*done); } +void LoadSignedIntDataViewElement::SetValueLocationConstraints() { + UseRegister(object_input()); + UseRegister(index_input()); + if (is_little_endian_constant() || + type_ == ExternalArrayType::kExternalInt8Array) { + UseAny(is_little_endian_input()); + } else { + UseRegister(is_little_endian_input()); + } + DefineAsRegister(this); +} +void LoadSignedIntDataViewElement::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + Register result_reg = ToRegister(result()); + + __ AssertNotSmi(object); + if (v8_flags.debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ CompareObjectType(object, scratch, scratch, JS_DATA_VIEW_TYPE); + __ Assert(hs, AbortReason::kUnexpectedValue); + } + + int element_size = ExternalArrayElementSize(type_); + + // Load data pointer. + { + UseScratchRegisterScope temps(masm); + Register data_pointer = temps.AcquireX(); + __ LoadExternalPointerField( + data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset)); + + __ LoadSignedField(result_reg.W(), MemOperand(data_pointer, index), + element_size); + } + + // We ignore little endian argument if type is a byte size. + if (type_ != ExternalArrayType::kExternalInt8Array) { + if (is_little_endian_constant()) { + if (!FromConstantToBool(masm, is_little_endian_input().node())) { + __ ReverseByteOrder(result_reg, element_size); + } + } else { + ZoneLabelRef is_little_endian(masm), is_big_endian(masm); + __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian, + is_big_endian, false); + __ bind(*is_big_endian); + __ ReverseByteOrder(result_reg, element_size); + __ bind(*is_little_endian); + // arm64 is little endian. + static_assert(V8_TARGET_LITTLE_ENDIAN == 1); + } + } +} + +void StoreSignedIntDataViewElement::SetValueLocationConstraints() { + UseRegister(object_input()); + UseRegister(index_input()); + if (ExternalArrayElementSize(type_) > 1) { + UseAndClobberRegister(value_input()); + } else { + UseRegister(value_input()); + } + if (is_little_endian_constant() || + type_ == ExternalArrayType::kExternalInt8Array) { + UseAny(is_little_endian_input()); + } else { + UseRegister(is_little_endian_input()); + } +} +void StoreSignedIntDataViewElement::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + Register value = ToRegister(value_input()); + + __ AssertNotSmi(object); + if (v8_flags.debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ CompareObjectType(object, scratch, scratch, JS_DATA_VIEW_TYPE); + __ Assert(hs, AbortReason::kUnexpectedValue); + } + + int element_size = ExternalArrayElementSize(type_); + + // We ignore little endian argument if type is a byte size. + if (element_size > 1) { + if (is_little_endian_constant()) { + if (!FromConstantToBool(masm, is_little_endian_input().node())) { + __ ReverseByteOrder(value, element_size); + } + } else { + ZoneLabelRef is_little_endian(masm), is_big_endian(masm); + __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian, + is_big_endian, false); + __ bind(*is_big_endian); + __ ReverseByteOrder(value, element_size); + __ bind(*is_little_endian); + // arm64 is little endian. + static_assert(V8_TARGET_LITTLE_ENDIAN == 1); + } + } + + UseScratchRegisterScope temps(masm); + Register data_pointer = temps.AcquireX(); + __ LoadExternalPointerField( + data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset)); + __ StoreField(MemOperand(data_pointer, index), value.W(), element_size); +} + +void LoadDoubleDataViewElement::SetValueLocationConstraints() { + UseRegister(object_input()); + UseRegister(index_input()); + if (is_little_endian_constant()) { + UseAny(is_little_endian_input()); + } else { + UseRegister(is_little_endian_input()); + } + set_temporaries_needed(1); + DefineAsRegister(this); +} +void LoadDoubleDataViewElement::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + DoubleRegister result_reg = ToDoubleRegister(result()); + Register data_pointer = general_temporaries().PopFirst(); + + __ AssertNotSmi(object); + if (v8_flags.debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ CompareObjectType(object, scratch, scratch, JS_DATA_VIEW_TYPE); + __ Assert(hs, AbortReason::kUnexpectedValue); + } + + // Load data pointer. + __ LoadExternalPointerField( + data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset)); + + if (is_little_endian_constant()) { + if (FromConstantToBool(masm, is_little_endian_input().node())) { + __ Move(result_reg, MemOperand(data_pointer, index)); + } else { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ Move(scratch, MemOperand(data_pointer, index)); + __ Rev(scratch, scratch); + __ Fmov(result_reg, scratch); + } + } else { + Label done; + ZoneLabelRef is_little_endian(masm), is_big_endian(masm); + // TODO(leszeks): We're likely to be calling this on an existing boolean -- + // maybe that's a case we should fast-path here and re-use that boolean + // value? + __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian, + is_big_endian, true); + // arm64 is little endian. + static_assert(V8_TARGET_LITTLE_ENDIAN == 1); + __ bind(*is_little_endian); + __ Move(result_reg, MemOperand(data_pointer, index)); + __ jmp(&done); + // We should swap the bytes if big endian. + __ bind(*is_big_endian); + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ Move(scratch, MemOperand(data_pointer, index)); + __ Rev(scratch, scratch); + __ Fmov(result_reg, scratch); + __ bind(&done); + } +} + +void StoreDoubleDataViewElement::SetValueLocationConstraints() { + UseRegister(object_input()); + UseRegister(index_input()); + UseRegister(value_input()); + if (is_little_endian_constant()) { + UseAny(is_little_endian_input()); + } else { + UseRegister(is_little_endian_input()); + } + set_temporaries_needed(1); +} +void StoreDoubleDataViewElement::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + DoubleRegister value = ToDoubleRegister(value_input()); + Register data_pointer = general_temporaries().PopFirst(); + + __ AssertNotSmi(object); + if (v8_flags.debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ CompareObjectType(object, scratch, scratch, JS_DATA_VIEW_TYPE); + __ Assert(hs, AbortReason::kUnexpectedValue); + } + + // Load data pointer. + __ LoadExternalPointerField( + data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset)); + + if (is_little_endian_constant()) { + if (FromConstantToBool(masm, is_little_endian_input().node())) { + __ Str(value, MemOperand(data_pointer, index)); + } else { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ Fmov(scratch, value); + __ Rev(scratch, scratch); + __ Str(scratch, MemOperand(data_pointer, index)); + } + } else { + Label done; + ZoneLabelRef is_little_endian(masm), is_big_endian(masm); + // TODO(leszeks): We're likely to be calling this on an existing boolean -- + // maybe that's a case we should fast-path here and re-use that boolean + // value? + __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian, + is_big_endian, true); + // arm64 is little endian. + static_assert(V8_TARGET_LITTLE_ENDIAN == 1); + __ bind(*is_little_endian); + __ Str(value, MemOperand(data_pointer, index)); + __ jmp(&done); + // We should swap the bytes if big endian. + __ bind(*is_big_endian); + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ Fmov(scratch, value); + __ Rev(scratch, scratch); + __ Str(scratch, MemOperand(data_pointer, index)); + __ bind(&done); + } +} + int StoreTaggedFieldWithWriteBarrier::MaxCallStackArgs() const { return WriteBarrierDescriptor::GetStackParameterCount(); } diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index 94b838b73f..28f95585a3 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -89,13 +89,13 @@ class MaglevAssembler : public MacroAssembler { inline void LoadBoundedSizeFromObject(Register result, Register object, int offset); - inline void LoadExternalPointerField(Register result, Operand operand); + inline void LoadExternalPointerField(Register result, MemOperand operand); - inline void LoadSignedField(Register result, Operand operand, + inline void LoadSignedField(Register result, MemOperand operand, int element_size); inline void LoadUnsignedField(Register result, Operand operand, int element_size); - inline void StoreField(Operand operand, Register value, int element_size); + inline void StoreField(MemOperand operand, Register value, int element_size); inline void ReverseByteOrder(Register value, int element_size); // Warning: Input registers {string} and {index} will be scratched. diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index f60213452e..7220493b8d 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -187,6 +187,21 @@ bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const { return RootToBoolean(index_); } +bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node) { + DCHECK(IsConstantNode(node->opcode())); + LocalIsolate* local_isolate = masm->isolate()->AsLocalIsolate(); + switch (node->opcode()) { +#define CASE(Name) \ + case Opcode::k##Name: { \ + return node->Cast()->ToBoolean(local_isolate); \ + } + CONSTANT_VALUE_NODE_LIST(CASE) +#undef CASE + default: + UNREACHABLE(); + } +} + DeoptInfo::DeoptInfo(Zone* zone, DeoptFrame top_frame, compiler::FeedbackSource feedback_to_update) : top_frame_(top_frame), diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index 3a9ab04289..04c67d12db 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -399,6 +399,20 @@ enum class ValueRepresentation : uint8_t { }; constexpr Condition ConditionFor(Operation cond); +bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node); + +inline int ExternalArrayElementSize(const ExternalArrayType element_type) { + switch (element_type) { +#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ + case kExternal##Type##Array: \ + DCHECK_LE(sizeof(ctype), 8); \ + return sizeof(ctype); + TYPED_ARRAYS(TYPED_ARRAY_CASE) + default: + UNREACHABLE(); +#undef TYPED_ARRAY_CASE + } +} inline std::ostream& operator<<(std::ostream& os, const ValueRepresentation& repr) { diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 42c2e9868a..96375c6037 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -489,19 +489,6 @@ int ElementsKindSize(ElementsKind element_kind) { #undef TYPED_ARRAY_CASE } } - -int ExternalArrayElementSize(const ExternalArrayType element_type) { - switch (element_type) { -#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ - case kExternal##Type##Array: \ - DCHECK_LE(sizeof(ctype), 8); \ - return sizeof(ctype); - TYPED_ARRAYS(TYPED_ARRAY_CASE) - default: - UNREACHABLE(); -#undef TYPED_ARRAY_CASE - } -} } // namespace void CheckJSTypedArrayBounds::SetValueLocationConstraints() { @@ -854,23 +841,6 @@ void LoadDoubleElement::GenerateCode(MaglevAssembler* masm, FixedDoubleArray::kHeaderSize)); } -namespace { -bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node) { - DCHECK(IsConstantNode(node->opcode())); - LocalIsolate* local_isolate = masm->isolate()->AsLocalIsolate(); - switch (node->opcode()) { -#define CASE(Name) \ - case Opcode::k##Name: { \ - return node->Cast()->ToBoolean(local_isolate); \ - } - CONSTANT_VALUE_NODE_LIST(CASE) -#undef CASE - default: - UNREACHABLE(); - } -} -} // namespace - void LoadSignedIntDataViewElement::SetValueLocationConstraints() { UseRegister(object_input()); UseRegister(index_input()); From 1fe5f0f8e1302ebcead13065d13b4aa9f8380d2b Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Tue, 3 Jan 2023 13:18:17 +0100 Subject: [PATCH 121/654] [maglev][arm64] Add SetPending IR ... and add exception handlers trampolines as a jump target for CFI. Bug: v8:7700 Change-Id: Ie0ef6617ae5a42965862e5f3cf0d7a50158267bb Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128528 Commit-Queue: Victor Gomes Reviewed-by: Darius Mercadier Auto-Submit: Victor Gomes Cr-Commit-Position: refs/heads/main@{#85072} --- src/maglev/arm64/maglev-assembler-arm64-inl.h | 4 +++ src/maglev/arm64/maglev-ir-arm64.cc | 27 ++++++++++++++++++- src/maglev/maglev-assembler.h | 1 + src/maglev/maglev-code-generator.cc | 4 +-- src/maglev/x64/maglev-assembler-x64-inl.h | 2 ++ 5 files changed, 35 insertions(+), 3 deletions(-) diff --git a/src/maglev/arm64/maglev-assembler-arm64-inl.h b/src/maglev/arm64/maglev-assembler-arm64-inl.h index 369f8336ed..1a69e352b7 100644 --- a/src/maglev/arm64/maglev-assembler-arm64-inl.h +++ b/src/maglev/arm64/maglev-assembler-arm64-inl.h @@ -241,6 +241,10 @@ void MaglevAssembler::PushReverse(T... vals) { detail::PushAllReverse(this, vals...); } +inline void MaglevAssembler::BindJumpTarget(Label* label) { + MacroAssembler::BindJumpTarget(label); +} + inline void MaglevAssembler::BindBlock(BasicBlock* block) { if (block->is_start_block_of_switch_case()) { BindJumpTarget(block->label()); diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index e763bdad32..a2a1474869 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -163,7 +163,6 @@ void ConvertReceiver::GenerateCode(MaglevAssembler* masm, UNIMPLEMENTED_NODE(LoadSignedIntTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(LoadUnsignedIntTypedArrayElement, elements_kind_) UNIMPLEMENTED_NODE(LoadDoubleTypedArrayElement, elements_kind_) -UNIMPLEMENTED_NODE(SetPendingMessage) int ToObject::MaxCallStackArgs() const { using D = CallInterfaceDescriptorFor::type; @@ -2157,6 +2156,32 @@ void StoreTaggedFieldWithWriteBarrier::GenerateCode( __ bind(*done); } +void SetPendingMessage::SetValueLocationConstraints() { + UseRegister(value()); + DefineAsRegister(this); +} + +void SetPendingMessage::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register new_message = ToRegister(value()); + Register return_value = ToRegister(result()); + + UseScratchRegisterScope temps(masm); + Register scratch0 = temps.AcquireX(); + MemOperand pending_message_operand = __ ExternalReferenceAsOperand( + ExternalReference::address_of_pending_message(masm->isolate()), scratch0); + + if (new_message != return_value) { + __ Ldr(return_value, pending_message_operand); + __ Str(new_message, pending_message_operand); + } else { + Register scratch1 = temps.AcquireX(); + __ Ldr(scratch1, pending_message_operand); + __ Str(new_message, pending_message_operand); + __ Move(return_value, scratch1); + } +} + void StringLength::SetValueLocationConstraints() { UseRegister(object_input()); DefineAsRegister(this); diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index 28f95585a3..688750056e 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -78,6 +78,7 @@ class MaglevAssembler : public MacroAssembler { void LoadSingleCharacterString(Register result, Register char_code, Register scratch); + inline void BindJumpTarget(Label* label); inline void BindBlock(BasicBlock* block); inline Condition IsInt64Constant(Register reg, int64_t constant); diff --git a/src/maglev/maglev-code-generator.cc b/src/maglev/maglev-code-generator.cc index 6479c2d5a0..24d2465cb6 100644 --- a/src/maglev/maglev-code-generator.cc +++ b/src/maglev/maglev-code-generator.cc @@ -483,13 +483,13 @@ class ExceptionHandlerTrampolineBuilder { RecordMoves(lazy_frame.unit(), catch_block, lazy_frame.frame_state(), &direct_moves, &materialising_moves, &save_accumulator); - __ bind(&handler_info->trampoline_entry); + __ BindJumpTarget(&handler_info->trampoline_entry); __ RecordComment("-- Exception handler trampoline START"); EmitMaterialisationsAndPushResults(materialising_moves, save_accumulator); __ RecordComment("EmitMoves"); direct_moves.EmitMoves(); EmitPopMaterialisedResults(materialising_moves, save_accumulator); - __ jmp(catch_block->label()); + __ Jump(catch_block->label()); __ RecordComment("-- Exception handler trampoline END"); } diff --git a/src/maglev/x64/maglev-assembler-x64-inl.h b/src/maglev/x64/maglev-assembler-x64-inl.h index 69bd5d44d6..ee0bf03069 100644 --- a/src/maglev/x64/maglev-assembler-x64-inl.h +++ b/src/maglev/x64/maglev-assembler-x64-inl.h @@ -129,6 +129,8 @@ void MaglevAssembler::PushReverse(T... vals) { detail::PushAllHelper::PushReverse(this, vals...); } +inline void MaglevAssembler::BindJumpTarget(Label* label) { bind(label); } + inline void MaglevAssembler::BindBlock(BasicBlock* block) { bind(block->label()); } From dd6853142055e0dc1563d778777da8882102b5df Mon Sep 17 00:00:00 2001 From: Michael Lippautz Date: Tue, 3 Jan 2023 13:25:31 +0100 Subject: [PATCH 122/654] [heap] Moving scheduling GCs from LocalEmbedderHeapTracer to CppHeap Bug: v8:13207 Change-Id: Id595a34677cc58319043c0e784beb5eed9be7411 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128506 Commit-Queue: Michael Lippautz Reviewed-by: Anton Bikineev Cr-Commit-Position: refs/heads/main@{#85073} --- src/base/platform/time.h | 9 +++--- src/heap/cppgc-js/cpp-heap.cc | 53 +++++++++++++++++++++++++++++------ src/heap/cppgc-js/cpp-heap.h | 14 +++++++++ src/heap/embedder-tracing.cc | 25 ----------------- src/heap/embedder-tracing.h | 40 -------------------------- src/heap/heap.cc | 8 ++---- 6 files changed, 64 insertions(+), 85 deletions(-) diff --git a/src/base/platform/time.h b/src/base/platform/time.h index 0820c6f108..1ab8c94732 100644 --- a/src/base/platform/time.h +++ b/src/base/platform/time.h @@ -91,10 +91,10 @@ class V8_BASE_EXPORT TimeDelta final { return TimeDelta(nanoseconds / TimeConstants::kNanosecondsPerMicrosecond); } - static TimeDelta FromSecondsD(double seconds) { + static constexpr TimeDelta FromSecondsD(double seconds) { return FromDouble(seconds * TimeConstants::kMicrosecondsPerSecond); } - static TimeDelta FromMillisecondsD(double milliseconds) { + static constexpr TimeDelta FromMillisecondsD(double milliseconds) { return FromDouble(milliseconds * TimeConstants::kMicrosecondsPerMillisecond); } @@ -210,8 +210,7 @@ class V8_BASE_EXPORT TimeDelta final { } private: - // TODO(v8:10620): constexpr requires constexpr saturated_cast. - static inline TimeDelta FromDouble(double value); + static constexpr inline TimeDelta FromDouble(double value); template friend class time_internal::TimeBase; // Constructs a delta given the duration in microseconds. This is private @@ -224,7 +223,7 @@ class V8_BASE_EXPORT TimeDelta final { }; // static -TimeDelta TimeDelta::FromDouble(double value) { +constexpr TimeDelta TimeDelta::FromDouble(double value) { return TimeDelta(saturated_cast(value)); } diff --git a/src/heap/cppgc-js/cpp-heap.cc b/src/heap/cppgc-js/cpp-heap.cc index 67a33bd83c..f30734b198 100644 --- a/src/heap/cppgc-js/cpp-heap.cc +++ b/src/heap/cppgc-js/cpp-heap.cc @@ -588,6 +588,8 @@ bool ShouldReduceMemory(CppHeap::GarbageCollectionFlags flags) { return IsMemoryReducingGC(flags) || IsForceGC(flags); } +constexpr size_t kIncrementalMarkingCheckInterval = 128 * KB; + } // namespace CppHeap::MarkingType CppHeap::SelectMarkingType() const { @@ -761,6 +763,18 @@ bool CppHeap::FinishConcurrentMarkingIfNeeded() { return marker_->JoinConcurrentMarkingIfNeeded(); } +namespace { + +void RecordEmbedderSpeed(GCTracer* tracer, base::TimeDelta marking_time, + size_t marked_bytes) { + constexpr auto kMinReportingTime = base::TimeDelta::FromMillisecondsD(0.5); + if (marking_time > kMinReportingTime) { + tracer->RecordEmbedderSpeed(marked_bytes, marking_time.InMillisecondsF()); + } +} + +} // namespace + void CppHeap::TraceEpilogue() { CHECK(in_atomic_pause_); CHECK(marking_done_); @@ -785,11 +799,13 @@ void CppHeap::TraceEpilogue() { } marker_.reset(); if (isolate_) { - auto* tracer = isolate_->heap()->local_embedder_heap_tracer(); - DCHECK_NOT_NULL(tracer); - tracer->UpdateRemoteStats( - stats_collector_->marked_bytes(), - stats_collector_->marking_time().InMillisecondsF()); + used_size_ = stats_collector_->marked_bytes(); + // Force a check next time increased memory is reported. This allows for + // setting limits close to actual heap sizes. + allocated_size_limit_for_check_ = 0; + + RecordEmbedderSpeed(isolate_->heap()->tracer(), + stats_collector_->marking_time(), used_size_); } // The allocated bytes counter in v8 was reset to the current marked bytes, so // any pending allocated bytes updates should be discarded. @@ -858,18 +874,36 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() { return; } + // We are in attached state. + DCHECK_NOT_NULL(isolate_); + // The calls below may trigger full GCs that are synchronous and also execute // epilogue callbacks. Since such callbacks may allocate, the counter must // already be zeroed by that time. const int64_t bytes_to_report = buffered_allocated_bytes_; buffered_allocated_bytes_ = 0; - auto* const tracer = isolate_->heap()->local_embedder_heap_tracer(); - DCHECK_NOT_NULL(tracer); if (bytes_to_report < 0) { - tracer->DecreaseAllocatedSize(static_cast(-bytes_to_report)); + DCHECK_GE(used_size_.load(std::memory_order_relaxed), bytes_to_report); + used_size_.fetch_sub(bytes_to_report, std::memory_order_relaxed); } else { - tracer->IncreaseAllocatedSize(static_cast(bytes_to_report)); + used_size_.fetch_add(bytes_to_report, std::memory_order_relaxed); + allocated_size_ += bytes_to_report; + + if (v8_flags.global_gc_scheduling && v8_flags.incremental_marking) { + if (allocated_size_ > allocated_size_limit_for_check_) { + Heap* heap = isolate_->heap(); + heap->StartIncrementalMarkingIfAllocationLimitIsReached( + heap->GCFlagsForIncrementalMarking(), + kGCCallbackScheduleIdleGarbageCollection); + if (heap->AllocationLimitOvershotByLargeMargin()) { + heap->FinalizeIncrementalMarkingAtomically( + i::GarbageCollectionReason::kExternalFinalize); + } + allocated_size_limit_for_check_ = + allocated_size_ + kIncrementalMarkingCheckInterval; + } + } } } @@ -1069,6 +1103,7 @@ const cppgc::EmbedderStackState* CppHeap::override_stack_state() const { void CppHeap::StartIncrementalGarbageCollection(cppgc::internal::GCConfig) { UNIMPLEMENTED(); } + size_t CppHeap::epoch() const { UNIMPLEMENTED(); } void CppHeap::ResetCrossHeapRememberedSet() { diff --git a/src/heap/cppgc-js/cpp-heap.h b/src/heap/cppgc-js/cpp-heap.h index adc611b84c..66298f38cb 100644 --- a/src/heap/cppgc-js/cpp-heap.h +++ b/src/heap/cppgc-js/cpp-heap.h @@ -158,6 +158,11 @@ class V8_EXPORT_PRIVATE CppHeap final Isolate* isolate() const { return isolate_; } + size_t used_size() const { + return used_size_.load(std::memory_order_relaxed); + } + size_t allocated_size() const { return allocated_size_; } + ::heap::base::Stack* stack() final; std::unique_ptr CreateCppMarkingState(); @@ -223,6 +228,15 @@ class V8_EXPORT_PRIVATE CppHeap final bool force_incremental_marking_for_testing_ = false; bool is_in_v8_marking_step_ = false; + // Used size of objects. Reported to V8's regular heap growing strategy. + std::atomic used_size_{0}; + // Total bytes allocated since the last GC. Monotonically increasing value. + // Used to approximate allocation rate. + size_t allocated_size_ = 0; + // Limit for |allocated_size| in bytes to avoid checking for starting a GC + // on each increment. + size_t allocated_size_limit_for_check_ = 0; + friend class MetricRecorderAdapter; }; diff --git a/src/heap/embedder-tracing.cc b/src/heap/embedder-tracing.cc index 9f4ddf5ec2..349b94824d 100644 --- a/src/heap/embedder-tracing.cc +++ b/src/heap/embedder-tracing.cc @@ -56,18 +56,6 @@ void LocalEmbedderHeapTracer::TraceEpilogue() { cpp_heap()->TraceEpilogue(); } -void LocalEmbedderHeapTracer::UpdateRemoteStats(size_t allocated_size, - double time) { - remote_stats_.used_size = allocated_size; - // Force a check next time increased memory is reported. This allows for - // setting limits close to actual heap sizes. - remote_stats_.allocated_size_limit_for_check = 0; - constexpr double kMinReportingTimeMs = 0.5; - if (time > kMinReportingTimeMs) { - isolate_->heap()->tracer()->RecordEmbedderSpeed(allocated_size, time); - } -} - void LocalEmbedderHeapTracer::EnterFinalPause() { if (!InUse()) return; @@ -93,19 +81,6 @@ LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate, return {nullptr, nullptr}; } -void LocalEmbedderHeapTracer::StartIncrementalMarkingIfNeeded() { - if (!v8_flags.global_gc_scheduling || !v8_flags.incremental_marking) return; - - Heap* heap = isolate_->heap(); - heap->StartIncrementalMarkingIfAllocationLimitIsReached( - heap->GCFlagsForIncrementalMarking(), - kGCCallbackScheduleIdleGarbageCollection); - if (heap->AllocationLimitOvershotByLargeMargin()) { - heap->FinalizeIncrementalMarkingAtomically( - i::GarbageCollectionReason::kExternalFinalize); - } -} - void LocalEmbedderHeapTracer::EmbedderWriteBarrier(Heap* heap, JSObject js_object) { DCHECK(InUse()); diff --git a/src/heap/embedder-tracing.h b/src/heap/embedder-tracing.h index 6166eb0cee..fac8988c7b 100644 --- a/src/heap/embedder-tracing.h +++ b/src/heap/embedder-tracing.h @@ -87,33 +87,8 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { embedder_worklist_empty_ = is_empty; } - void IncreaseAllocatedSize(size_t bytes) { - remote_stats_.used_size.fetch_add(bytes, std::memory_order_relaxed); - remote_stats_.allocated_size += bytes; - if (remote_stats_.allocated_size > - remote_stats_.allocated_size_limit_for_check) { - StartIncrementalMarkingIfNeeded(); - remote_stats_.allocated_size_limit_for_check = - remote_stats_.allocated_size + kEmbedderAllocatedThreshold; - } - } - - void DecreaseAllocatedSize(size_t bytes) { - DCHECK_GE(remote_stats_.used_size.load(std::memory_order_relaxed), bytes); - remote_stats_.used_size.fetch_sub(bytes, std::memory_order_relaxed); - } - - void StartIncrementalMarkingIfNeeded(); - - size_t used_size() const { - return remote_stats_.used_size.load(std::memory_order_relaxed); - } - size_t allocated_size() const { return remote_stats_.allocated_size; } - WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object); - void UpdateRemoteStats(size_t, double); - cppgc::EmbedderStackState embedder_stack_state() const { return embedder_stack_state_; } @@ -121,8 +96,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { void EmbedderWriteBarrier(Heap*, JSObject); private: - static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB; - CppHeap* cpp_heap() { DCHECK_NOT_NULL(cpp_heap_); DCHECK_IMPLIES(isolate_, cpp_heap_ == isolate_->heap()->cpp_heap()); @@ -143,19 +116,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { // segments of potential embedder fields to move to the main thread. bool embedder_worklist_empty_ = false; - struct RemoteStatistics { - // Used size of objects in bytes reported by the embedder. Updated via - // TraceSummary at the end of tracing and incrementally when the GC is not - // in progress. - std::atomic used_size{0}; - // Totally bytes allocated by the embedder. Monotonically - // increasing value. Used to approximate allocation rate. - size_t allocated_size = 0; - // Limit for |allocated_size| in bytes to avoid checking for starting a GC - // on each increment. - size_t allocated_size_limit_for_check = 0; - } remote_stats_; - friend class EmbedderStackStateScope; }; diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 393aa02165..38f753e207 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -5123,9 +5123,7 @@ size_t Heap::OldGenerationSizeOfObjects() { } size_t Heap::EmbedderSizeOfObjects() const { - return local_embedder_heap_tracer() - ? local_embedder_heap_tracer()->used_size() - : 0; + return cpp_heap_ ? CppHeap::From(cpp_heap_)->used_size() : 0; } size_t Heap::GlobalSizeOfObjects() { @@ -6837,9 +6835,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) { } size_t Heap::EmbedderAllocationCounter() const { - return local_embedder_heap_tracer() - ? local_embedder_heap_tracer()->allocated_size() - : 0; + return cpp_heap_ ? CppHeap::From(cpp_heap_)->allocated_size() : 0; } void Heap::CreateObjectStats() { From d7fa9b66dccb4614fba28b39040915ef70747e1d Mon Sep 17 00:00:00 2001 From: Al Muthanna Athamina Date: Wed, 21 Dec 2022 15:25:11 +0100 Subject: [PATCH 123/654] [infra] Add TSAN CQ and CI builders with debug/dchecks Bug: v8:13548 Change-Id: I4d0acf20ec27870540782fc7c2555286b8d7a4c5 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4066480 Commit-Queue: Almothana Athamneh Reviewed-by: Michael Achenbach Cr-Commit-Position: refs/heads/main@{#85074} --- BUILD.gn | 2 ++ infra/mb/mb_config.pyl | 23 ++++++++++++++++++ infra/testing/builders.pyl | 24 +++++++++++++++++++ tools/testrunner/base_runner.py | 23 +++++++++++------- tools/testrunner/build_config.py | 4 ++++ .../testroot1/out/build/v8_build_config.json | 2 ++ .../testroot2/out/build/v8_build_config.json | 2 ++ .../testroot3/out/build/v8_build_config.json | 2 ++ .../out.gn/build/v8_build_config.json | 2 ++ .../testroot6/out/build/v8_build_config.json | 2 ++ 10 files changed, 77 insertions(+), 9 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index 26adeb7be9..9beefd6966 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -2350,6 +2350,8 @@ action("v8_dump_build_config") { "v8_control_flow_integrity=$v8_control_flow_integrity", "v8_target_cpu=\"$v8_target_cpu\"", "v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack", + "v8_enable_verify_heap=$v8_enable_verify_heap", + "v8_enable_slow_dchecks=$v8_enable_slow_dchecks", ] if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { diff --git a/infra/mb/mb_config.pyl b/infra/mb/mb_config.pyl index c494280b72..60d1acd8e4 100644 --- a/infra/mb/mb_config.pyl +++ b/infra/mb/mb_config.pyl @@ -102,6 +102,7 @@ # Sanitizers. 'V8 Linux64 ASAN - builder': 'release_x64_asan', 'V8 Linux64 TSAN - builder': 'release_x64_tsan', + 'V8 Linux64 TSAN - debug builder': 'debug_x64_tsan_minimal_symbols', 'V8 Linux64 TSAN - no-concurrent-marking - builder': 'release_x64_tsan_no_cm', 'V8 Linux - arm64 - sim - CFI - builder': 'release_simulate_arm64_cfi', 'V8 Linux - arm64 - sim - MSAN - builder': 'release_simulate_arm64_msan', @@ -257,6 +258,7 @@ 'v8_linux_riscv32_compile_rel': 'release_simulate_riscv32', 'v8_linux64_riscv64_compile_rel': 'release_simulate_riscv64', 'v8_linux64_tsan_compile_rel': 'release_x64_tsan_minimal_symbols', + 'v8_linux64_tsan_compile_dbg': 'debug_x64_tsan_minimal_symbols', 'v8_linux64_tsan_no_cm_compile_rel': 'release_x64_tsan_no_cm', 'v8_linux64_tsan_isolates_compile_rel': 'release_x64_tsan_minimal_symbols', @@ -599,6 +601,9 @@ 'debug_trybot', 'x64', 'v8_enable_dict_property_const_tracking'], 'debug_x64_trybot_custom': [ 'debug_trybot', 'x64', 'v8_snapshot_custom'], + 'debug_x64_tsan_minimal_symbols': [ + 'debug_bot_no_slow_dchecks', 'minimal_symbols', 'x64', 'dcheck_always_on', + 'tsan', 'v8_disable_verify_heap', 'v8_fast_mksnapshot'], 'full_debug_x64': [ 'debug_bot', 'x64', 'v8_full_debug'], @@ -712,6 +717,12 @@ 'gn_args': 'is_debug=true v8_enable_backtrace=true', }, + 'debug_bot_no_slow_dchecks': { + 'mixins': [ + 'debug', 'shared', 'goma', 'v8_disable_slow_dchecks', + 'v8_optimized_debug', 'v8_enable_google_benchmark'], + }, + 'debug_bot': { 'mixins': [ 'debug', 'shared', 'goma', 'v8_enable_slow_dchecks', @@ -934,6 +945,10 @@ 'gn_args': 'v8_enable_runtime_call_stats=false', }, + 'v8_disable_verify_heap': { + 'gn_args': 'v8_enable_verify_heap=false', + }, + 'v8_expose_memory_corruption_api': { 'gn_args': 'v8_expose_memory_corruption_api=true', }, @@ -946,6 +961,10 @@ 'gn_args': 'v8_enable_slow_dchecks=true', }, + 'v8_disable_slow_dchecks': { + 'gn_args': 'v8_enable_slow_dchecks=false', + }, + 'v8_enable_javascript_promise_hooks': { 'gn_args': 'v8_enable_javascript_promise_hooks=true', }, @@ -981,6 +1000,10 @@ 'gn_args': 'v8_enable_vtunejit=true v8_enable_vtunetracemark=true', }, + 'v8_fast_mksnapshot': { + 'gn_args': 'v8_enable_fast_mksnapshot=true', + }, + 'v8_full_debug': { 'gn_args': 'v8_optimized_debug=false', }, diff --git a/infra/testing/builders.pyl b/infra/testing/builders.pyl index d6fcb8db83..2a330f6925 100644 --- a/infra/testing/builders.pyl +++ b/infra/testing/builders.pyl @@ -672,6 +672,18 @@ {'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2}, ], }, + 'v8_linux64_tsan_dbg': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'benchmarks', 'shards': 2}, + {'name': 'mozilla', 'shards': 4}, + {'name': 'test262', 'variant': 'default', 'shards': 5}, + {'name': 'v8testing', 'shards': 12}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 10}, + ], + }, 'v8_linux64_tsan_no_cm_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', @@ -1671,6 +1683,18 @@ {'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2}, ], }, + 'V8 Linux64 TSAN - debug': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'benchmarks', 'shards': 2}, + {'name': 'mozilla', 'shards': 4}, + {'name': 'test262', 'variant': 'default', 'shards': 5}, + {'name': 'v8testing', 'shards': 12}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 10}, + ], + }, 'V8 Linux64 TSAN - stress-incremental-marking': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', diff --git a/tools/testrunner/base_runner.py b/tools/testrunner/base_runner.py index 826ee321fc..a88d6fec05 100644 --- a/tools/testrunner/base_runner.py +++ b/tools/testrunner/base_runner.py @@ -90,16 +90,8 @@ TEST_MAP = { ModeConfig = namedtuple( 'ModeConfig', 'label flags timeout_scalefactor status_mode') -DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"] RELEASE_FLAGS = ["--nohard-abort"] -DEBUG_MODE = ModeConfig( - label='debug', - flags=DEBUG_FLAGS, - timeout_scalefactor=4, - status_mode="debug", -) - RELEASE_MODE = ModeConfig( label='release', flags=RELEASE_FLAGS, @@ -390,9 +382,22 @@ class BaseTestRunner(object): print(">>> Latest GN build found: %s" % latest_config) return os.path.join(DEFAULT_OUT_GN, latest_config) + def _custom_debug_mode(self): + custom_debug_flags = ["--nohard-abort"] + if self.build_config.verify_heap: + custom_debug_flags += ["--verify-heap"] + if self.build_config.slow_dchecks: + custom_debug_flags += ["--enable-slow-asserts"] + return ModeConfig( + label='debug', + flags=custom_debug_flags, + timeout_scalefactor=4, + status_mode="debug", + ) + def _process_default_options(self): if self.build_config.is_debug: - self.mode_options = DEBUG_MODE + self.mode_options = self._custom_debug_mode() elif self.build_config.dcheck_always_on: self.mode_options = TRY_RELEASE_MODE else: diff --git a/tools/testrunner/build_config.py b/tools/testrunner/build_config.py index c6e22cb30f..cdc11681f8 100644 --- a/tools/testrunner/build_config.py +++ b/tools/testrunner/build_config.py @@ -44,6 +44,8 @@ class BuildConfig(object): # TODO(machenbach): We only have ubsan not ubsan_vptr. self.ubsan_vptr = build_config['is_ubsan_vptr'] self.verify_csa = build_config['v8_enable_verify_csa'] + self.verify_heap = build_config['v8_enable_verify_heap'] + self.slow_dchecks = build_config['v8_enable_slow_dchecks'] self.lite_mode = build_config['v8_enable_lite_mode'] self.pointer_compression = build_config['v8_enable_pointer_compression'] self.pointer_compression_shared_cage = build_config[ @@ -152,6 +154,8 @@ class BuildConfig(object): 'third_party_heap', 'webassembly', 'dict_property_const_tracking', + 'verify_heap', + 'slow_dchecks', ] detected_options = [attr for attr in attrs if getattr(self, attr, False)] return '\n'.join(detected_options) diff --git a/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json index 09f85ca772..9f1743780e 100644 --- a/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json @@ -20,6 +20,8 @@ "v8_enable_conservative_stack_scanning": false, "v8_enable_concurrent_marking": true, "v8_enable_verify_csa": false, + "v8_enable_verify_heap": false, + "v8_enable_slow_dchecks": false, "v8_enable_lite_mode": false, "v8_enable_pointer_compression": true, "v8_enable_pointer_compression_shared_cage": true, diff --git a/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json index 7d1eb4b0f8..a0b2cb87b4 100644 --- a/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json @@ -20,6 +20,8 @@ "v8_enable_conservative_stack_scanning": false, "v8_enable_concurrent_marking": true, "v8_enable_verify_csa": false, + "v8_enable_verify_heap": false, + "v8_enable_slow_dchecks": false, "v8_enable_lite_mode": false, "v8_enable_pointer_compression": false, "v8_enable_pointer_compression_shared_cage": false, diff --git a/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json index 09f85ca772..9f1743780e 100644 --- a/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json @@ -20,6 +20,8 @@ "v8_enable_conservative_stack_scanning": false, "v8_enable_concurrent_marking": true, "v8_enable_verify_csa": false, + "v8_enable_verify_heap": false, + "v8_enable_slow_dchecks": false, "v8_enable_lite_mode": false, "v8_enable_pointer_compression": true, "v8_enable_pointer_compression_shared_cage": true, diff --git a/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json b/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json index 09f85ca772..9f1743780e 100644 --- a/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json @@ -20,6 +20,8 @@ "v8_enable_conservative_stack_scanning": false, "v8_enable_concurrent_marking": true, "v8_enable_verify_csa": false, + "v8_enable_verify_heap": false, + "v8_enable_slow_dchecks": false, "v8_enable_lite_mode": false, "v8_enable_pointer_compression": true, "v8_enable_pointer_compression_shared_cage": true, diff --git a/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json index 09f85ca772..9f1743780e 100644 --- a/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json @@ -20,6 +20,8 @@ "v8_enable_conservative_stack_scanning": false, "v8_enable_concurrent_marking": true, "v8_enable_verify_csa": false, + "v8_enable_verify_heap": false, + "v8_enable_slow_dchecks": false, "v8_enable_lite_mode": false, "v8_enable_pointer_compression": true, "v8_enable_pointer_compression_shared_cage": true, From 8368b035cd609acac58a7d2548393b7362931da0 Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Tue, 3 Jan 2023 13:47:25 +0100 Subject: [PATCH 124/654] [arm64] CheckSmi used in release code CheckSmi is used by Maglev in release mode, so it should not be defined inside a #ifdef V8_ENABLE_DEBUG_CODE Change-Id: I5dfe23d90fcc662fa91e541bdb8df10c5d2a4e7d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128616 Commit-Queue: Darius Mercadier Auto-Submit: Victor Gomes Reviewed-by: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85075} --- src/codegen/arm64/macro-assembler-arm64.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/codegen/arm64/macro-assembler-arm64.cc b/src/codegen/arm64/macro-assembler-arm64.cc index 5f64400739..57a658a83a 100644 --- a/src/codegen/arm64/macro-assembler-arm64.cc +++ b/src/codegen/arm64/macro-assembler-arm64.cc @@ -1487,6 +1487,12 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( TailCallOptimizedCodeSlot(this, optimized_code_entry, x4); } +Condition TurboAssembler::CheckSmi(Register object) { + static_assert(kSmiTag == 0); + Tst(object, kSmiTagMask); + return eq; +} + #ifdef V8_ENABLE_DEBUG_CODE void TurboAssembler::AssertSpAligned() { if (!v8_flags.debug_code) return; @@ -1525,12 +1531,6 @@ void TurboAssembler::AssertFPCRState(Register fpcr) { Bind(&done); } -Condition TurboAssembler::CheckSmi(Register object) { - static_assert(kSmiTag == 0); - Tst(object, kSmiTagMask); - return eq; -} - void TurboAssembler::AssertSmi(Register object, AbortReason reason) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); From a7285a717b030364c2d9ce860da690814142cce5 Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Wed, 28 Dec 2022 12:03:56 +0100 Subject: [PATCH 125/654] [wasm] Remove wasm_write_protect_code variant This mode is not used in production any more, and will be removed from the code base soon. Thus stop executing this variant on bots and remove the variant definition. R=machenbach@chromium.org Bug: v8:13632 Change-Id: I15ff76fa6c5b52f5287e758a80f955ffb1278261 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4127158 Reviewed-by: Michael Achenbach Commit-Queue: Clemens Backes Cr-Commit-Position: refs/heads/main@{#85076} --- infra/testing/builders.pyl | 6 ------ tools/testrunner/local/variants.py | 4 ---- 2 files changed, 10 deletions(-) diff --git a/infra/testing/builders.pyl b/infra/testing/builders.pyl index 2a330f6925..2a9a986bc2 100644 --- a/infra/testing/builders.pyl +++ b/infra/testing/builders.pyl @@ -481,8 +481,6 @@ {'name': 'mjsunit', 'variant': 'stress_maglev'}, # Experimental regexp engine. {'name': 'mjsunit', 'variant': 'experimental_regexp'}, - # Wasm write protect code space. - {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'}, ], }, 'v8_linux64_gc_stress_custom_snapshot_dbg': { @@ -1475,8 +1473,6 @@ {'name': 'mjsunit', 'variant': 'stress_maglev'}, # Experimental regexp engine. {'name': 'mjsunit', 'variant': 'experimental_regexp'}, - # Wasm write protect code space. - {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'}, ], }, 'V8 Linux64 - cppgc-non-default - debug': { @@ -1541,8 +1537,6 @@ {'name': 'mjsunit', 'variant': 'stress_maglev'}, # Experimental regexp engine. {'name': 'mjsunit', 'variant': 'experimental_regexp'}, - # Wasm write protect code space. - {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'}, ], }, 'V8 Linux64 gcc': { diff --git a/tools/testrunner/local/variants.py b/tools/testrunner/local/variants.py index 6d8861d787..8f13c0274e 100644 --- a/tools/testrunner/local/variants.py +++ b/tools/testrunner/local/variants.py @@ -53,7 +53,6 @@ ALL_VARIANT_FLAGS = { "instruction_scheduling": [["--turbo-instruction-scheduling"]], "stress_instruction_scheduling": [["--turbo-stress-instruction-scheduling"] ], - "wasm_write_protect_code": [["--wasm-write-protect-code-memory"]], # Google3 variants. "google3_icu": [[]], "google3_noicu": [[]], @@ -95,9 +94,6 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = { "--cache=after-execute", "--cache=full-code-cache", "--cache=none" ], "experimental_regexp": ["--no-enable-experimental-regexp-engine"], - # There is a negative implication: --perf-prof disables - # --wasm-write-protect-code-memory. - "wasm_write_protect_code": ["--perf-prof"], "assert_types": [ "--concurrent-recompilation", "--stress_concurrent_inlining", "--no-assert-types" From 1fe6f5ca925a325a4f6a6ac409fe080b54acadc2 Mon Sep 17 00:00:00 2001 From: Al Muthanna Athamina Date: Fri, 30 Dec 2022 10:43:56 +0100 Subject: [PATCH 126/654] Skip wasm/log-code-after-post-message and asm/regress-1395401 until issue is fixed Bug: v8:13545 Change-Id: Ib7b5a2c2ce79ade5835eb467cd78a85b8049866a No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4127960 Reviewed-by: Michael Achenbach Commit-Queue: Michael Achenbach Auto-Submit: Almothana Athamneh Cr-Commit-Position: refs/heads/main@{#85077} --- test/mjsunit/mjsunit.status | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index b69d19df1b..21b9724c18 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -1114,6 +1114,10 @@ # BUG(v8:13379) maglev-inlining flag isn't stable enough for fuzzing. 'maglev/eager-deopt-in-inline': [SKIP], + + # BUG(v8:13545) Skipped until issue is fixed to reduce noise on alerts. + 'wasm/log-code-after-post-message': [SKIP], + 'asm/regress-1395401': [SKIP], }], # gc_fuzzer or deopt_fuzzer or interrupt_fuzzer ############################################################################## From 58421a81652fd4ab40d959618d06f2e99eaf5dff Mon Sep 17 00:00:00 2001 From: Darius M Date: Tue, 3 Jan 2023 14:34:30 +0100 Subject: [PATCH 127/654] [maglev][arm64] Fix wrong scaling in StringCharCodeAt Bug: v8:7700 Change-Id: I940280221deabf49c87fdc099afb77bfce9015c4 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128531 Auto-Submit: Darius Mercadier Reviewed-by: Victor Gomes Commit-Queue: Darius Mercadier Commit-Queue: Victor Gomes Cr-Commit-Position: refs/heads/main@{#85078} --- src/maglev/arm64/maglev-assembler-arm64.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/maglev/arm64/maglev-assembler-arm64.cc b/src/maglev/arm64/maglev-assembler-arm64.cc index f30fbb5640..0363222d47 100644 --- a/src/maglev/arm64/maglev-assembler-arm64.cc +++ b/src/maglev/arm64/maglev-assembler-arm64.cc @@ -519,7 +519,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot, B(result_fits_one_byte); bind(&two_byte_string); - Lsl(index, index, 2); + Lsl(index, index, 1); Add(index, index, SeqTwoByteString::kHeaderSize - kHeapObjectTag); Ldrh(result, MemOperand(string, index)); // Fallthrough. From c5bf7a36b6367ac297c68af6bc184d8e0289e832 Mon Sep 17 00:00:00 2001 From: Darius M Date: Tue, 3 Jan 2023 14:43:14 +0100 Subject: [PATCH 128/654] [maglev][arm64] Implement TypedArray IRs Bug: v8:7700 Change-Id: I0409743886d9321fbe6991841c0b37e4f4f6814e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4130289 Reviewed-by: Victor Gomes Commit-Queue: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85079} --- src/maglev/arm64/maglev-assembler-arm64-inl.h | 13 ++ src/maglev/arm64/maglev-ir-arm64.cc | 133 +++++++++++++++++- src/maglev/maglev-assembler.h | 2 +- src/maglev/maglev-ir.h | 14 ++ src/maglev/x64/maglev-ir-x64.cc | 15 -- 5 files changed, 156 insertions(+), 21 deletions(-) diff --git a/src/maglev/arm64/maglev-assembler-arm64-inl.h b/src/maglev/arm64/maglev-assembler-arm64-inl.h index 1a69e352b7..b69266adb4 100644 --- a/src/maglev/arm64/maglev-assembler-arm64-inl.h +++ b/src/maglev/arm64/maglev-assembler-arm64-inl.h @@ -348,6 +348,19 @@ inline void MaglevAssembler::LoadSignedField(Register result, } } +inline void MaglevAssembler::LoadUnsignedField(Register result, + MemOperand operand, int size) { + if (size == 1) { + ldrb(result, operand); + } else if (size == 2) { + ldrh(result, operand); + } else { + DCHECK_EQ(size, 4); + DCHECK(result.IsW()); + ldr(result, operand); + } +} + inline void MaglevAssembler::StoreField(MemOperand operand, Register value, int size) { DCHECK(size == 1 || size == 2 || size == 4); diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index a2a1474869..bbfc80545f 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -160,10 +160,6 @@ void ConvertReceiver::GenerateCode(MaglevAssembler* masm, __ bind(&done); } -UNIMPLEMENTED_NODE(LoadSignedIntTypedArrayElement, elements_kind_) -UNIMPLEMENTED_NODE(LoadUnsignedIntTypedArrayElement, elements_kind_) -UNIMPLEMENTED_NODE(LoadDoubleTypedArrayElement, elements_kind_) - int ToObject::MaxCallStackArgs() const { using D = CallInterfaceDescriptorFor::type; return D::GetStackParameterCount(); @@ -243,7 +239,6 @@ void AssertInt32::GenerateCode(MaglevAssembler* masm, } UNIMPLEMENTED_NODE(CheckJSObjectElementsBounds) -UNIMPLEMENTED_NODE(CheckJSTypedArrayBounds, elements_kind_) UNIMPLEMENTED_NODE_WITH_CALL(JumpLoopPrologue, loop_depth_, unit_) int BuiltinStringFromCharCode::MaxCallStackArgs() const { @@ -1396,6 +1391,43 @@ void CheckedSmiTagUint32::GenerateCode(MaglevAssembler* masm, __ Assert(vc, AbortReason::kInputDoesNotFitSmi); } +void CheckJSTypedArrayBounds::SetValueLocationConstraints() { + UseRegister(receiver_input()); + if (ElementsKindSize(elements_kind_) == 1) { + UseRegister(index_input()); + } else { + UseAndClobberRegister(index_input()); + } +} +void CheckJSTypedArrayBounds::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(receiver_input()); + Register index = ToRegister(index_input()); + + if (v8_flags.debug_code) { + __ AssertNotSmi(object); + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ CompareObjectType(object, scratch, scratch, JS_TYPED_ARRAY_TYPE); + __ Assert(eq, AbortReason::kUnexpectedValue); + } + + UseScratchRegisterScope temps(masm); + Register byte_length = temps.AcquireX(); + __ LoadBoundedSizeFromObject(byte_length, object, + JSTypedArray::kRawByteLengthOffset); + int element_size = ElementsKindSize(elements_kind_); + if (element_size > 1) { + DCHECK(element_size == 2 || element_size == 4); + __ Cmp(byte_length, Operand(index, LSL, element_size / 2)); + } else { + __ Cmp(byte_length, index); + } + // We use {lo} which does an unsigned comparison to handle negative + // indices as well. + __ EmitEagerDeoptIf(lo, DeoptimizeReason::kOutOfBounds, this); +} + void CheckJSDataViewBounds::SetValueLocationConstraints() { UseRegister(receiver_input()); UseRegister(index_input()); @@ -1711,6 +1743,97 @@ void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm, __ bind(*done); } +void LoadSignedIntTypedArrayElement::SetValueLocationConstraints() { + UseRegister(object_input()); + UseRegister(index_input()); + DefineAsRegister(this); +} +void LoadSignedIntTypedArrayElement::GenerateCode( + MaglevAssembler* masm, const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + Register result_reg = ToRegister(result()); + Register data_pointer = result_reg; + __ AssertNotSmi(object); + if (v8_flags.debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ CompareObjectType(object, scratch, scratch, JS_TYPED_ARRAY_TYPE); + __ Assert(eq, AbortReason::kUnexpectedValue); + } + int element_size = ElementsKindSize(elements_kind_); + __ LoadExternalPointerField( + data_pointer, + FieldMemOperand(object, JSTypedArray::kExternalPointerOffset)); + __ Add(data_pointer, data_pointer, Operand(index, LSL, element_size / 2)); + __ LoadSignedField(result_reg.W(), MemOperand(data_pointer), element_size); +} + +void LoadUnsignedIntTypedArrayElement::SetValueLocationConstraints() { + UseRegister(object_input()); + UseRegister(index_input()); + DefineAsRegister(this); +} +void LoadUnsignedIntTypedArrayElement::GenerateCode( + MaglevAssembler* masm, const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + Register result_reg = ToRegister(result()); + Register data_pointer = result_reg; + __ AssertNotSmi(object); + if (v8_flags.debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ CompareObjectType(object, scratch, scratch, JS_TYPED_ARRAY_TYPE); + __ Assert(eq, AbortReason::kUnexpectedValue); + } + int element_size = ElementsKindSize(elements_kind_); + __ LoadExternalPointerField( + data_pointer, + FieldMemOperand(object, JSTypedArray::kExternalPointerOffset)); + __ Add(data_pointer, data_pointer, Operand(index, LSL, element_size / 2)); + __ LoadUnsignedField(result_reg.W(), MemOperand(data_pointer), element_size); +} + +void LoadDoubleTypedArrayElement::SetValueLocationConstraints() { + UseRegister(object_input()); + UseRegister(index_input()); + DefineAsRegister(this); +} +void LoadDoubleTypedArrayElement::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + DoubleRegister result_reg = ToDoubleRegister(result()); + + __ AssertNotSmi(object); + if (v8_flags.debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ CompareObjectType(object, scratch, scratch, JS_TYPED_ARRAY_TYPE); + __ Assert(eq, AbortReason::kUnexpectedValue); + } + + UseScratchRegisterScope temps(masm); + Register data_pointer = temps.AcquireX(); + __ LoadExternalPointerField( + data_pointer, + FieldMemOperand(object, JSTypedArray::kExternalPointerOffset)); + switch (elements_kind_) { + case FLOAT32_ELEMENTS: + __ Add(data_pointer, data_pointer, Operand(index, LSL, 2)); + __ Ldr(result_reg.S(), Operand(data_pointer)); + __ Fcvt(result_reg, result_reg.S()); + break; + case FLOAT64_ELEMENTS: + __ Add(data_pointer, data_pointer, Operand(index, LSL, 3)); + __ Ldr(result_reg, Operand(data_pointer)); + break; + default: + UNREACHABLE(); + } +} + void LoadDoubleField::SetValueLocationConstraints() { UseRegister(object_input()); DefineAsRegister(this); diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index 688750056e..2bdc69d411 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -94,7 +94,7 @@ class MaglevAssembler : public MacroAssembler { inline void LoadSignedField(Register result, MemOperand operand, int element_size); - inline void LoadUnsignedField(Register result, Operand operand, + inline void LoadUnsignedField(Register result, MemOperand operand, int element_size); inline void StoreField(MemOperand operand, Register value, int element_size); inline void ReverseByteOrder(Register value, int element_size); diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index 04c67d12db..68063379a1 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -399,6 +399,7 @@ enum class ValueRepresentation : uint8_t { }; constexpr Condition ConditionFor(Operation cond); + bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node); inline int ExternalArrayElementSize(const ExternalArrayType element_type) { @@ -414,6 +415,19 @@ inline int ExternalArrayElementSize(const ExternalArrayType element_type) { } } +inline int ElementsKindSize(ElementsKind element_kind) { + switch (element_kind) { +#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ + case TYPE##_ELEMENTS: \ + DCHECK_LE(sizeof(ctype), 8); \ + return sizeof(ctype); + TYPED_ARRAYS(TYPED_ARRAY_CASE) + default: + UNREACHABLE(); +#undef TYPED_ARRAY_CASE + } +} + inline std::ostream& operator<<(std::ostream& os, const ValueRepresentation& repr) { switch (repr) { diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 96375c6037..a61fc60c82 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -476,21 +476,6 @@ void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm, __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this); } -namespace { -int ElementsKindSize(ElementsKind element_kind) { - switch (element_kind) { -#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ - case TYPE##_ELEMENTS: \ - DCHECK_LE(sizeof(ctype), 8); \ - return sizeof(ctype); - TYPED_ARRAYS(TYPED_ARRAY_CASE) - default: - UNREACHABLE(); -#undef TYPED_ARRAY_CASE - } -} -} // namespace - void CheckJSTypedArrayBounds::SetValueLocationConstraints() { UseRegister(receiver_input()); if (ElementsKindSize(elements_kind_) == 1) { From 8a565c39d000db8bf9da0b58ab1f9d7cd26f52b1 Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Wed, 28 Dec 2022 12:05:07 +0100 Subject: [PATCH 129/654] [wasm] Remove mprotect-based code protection With lazy compilation we disabled mprotect-based code protection. We currently have no users and no test coverage of that flag. Hence remove it from the code base. R=ahaas@chromium.org Bug: v8:13632 Change-Id: I1e39499dfbdb896287901b97c32f00366449c466 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4114296 Reviewed-by: Andreas Haas Commit-Queue: Clemens Backes Cr-Commit-Position: refs/heads/main@{#85080} --- src/flags/flag-definitions.h | 8 +- src/wasm/code-space-access.cc | 53 +--- src/wasm/code-space-access.h | 21 +- src/wasm/wasm-code-manager.cc | 230 +----------------- src/wasm/wasm-code-manager.h | 41 ---- .../wasm/memory-protection-unittest.cc | 59 +---- 6 files changed, 27 insertions(+), 385 deletions(-) diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index e7ddb68c60..b795acb382 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -1024,11 +1024,8 @@ DEFINE_INT(wasm_num_compilation_tasks, 128, DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0) DEFINE_DEBUG_BOOL(trace_wasm_native_heap, false, "trace wasm native heap events") -DEFINE_BOOL(wasm_write_protect_code_memory, false, - "write protect code memory on the wasm native heap with mprotect") DEFINE_BOOL(wasm_memory_protection_keys, true, - "protect wasm code memory with PKU if available (takes precedence " - "over --wasm-write-protect-code-memory)") + "protect wasm code memory with PKU if available") DEFINE_DEBUG_BOOL(trace_wasm_serialization, false, "trace serialization/deserialization") DEFINE_BOOL(wasm_async_compilation, true, @@ -2247,9 +2244,6 @@ DEFINE_PERF_PROF_BOOL( DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space) // TODO(v8:8462) Remove implication once perf supports remapping. DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory) -#if V8_ENABLE_WEBASSEMBLY -DEFINE_NEG_IMPLICATION(perf_prof, wasm_write_protect_code_memory) -#endif // V8_ENABLE_WEBASSEMBLY // --perf-prof-unwinding-info is available only on selected architectures. #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \ diff --git a/src/wasm/code-space-access.cc b/src/wasm/code-space-access.cc index a9e76e2f1f..bac1002a28 100644 --- a/src/wasm/code-space-access.cc +++ b/src/wasm/code-space-access.cc @@ -12,46 +12,18 @@ namespace v8 { namespace internal { namespace wasm { -namespace { -// For PKU and if MAP_JIT is available, the CodeSpaceWriteScope does not -// actually make use of the supplied {NativeModule}. In fact, there are -// situations where we can't provide a specific {NativeModule} to the scope. For -// those situations, we use this dummy pointer instead. -NativeModule* GetDummyNativeModule() { - static struct alignas(NativeModule) DummyNativeModule { - char content; - } dummy_native_module; - return reinterpret_cast(&dummy_native_module); -} -} // namespace - -thread_local NativeModule* CodeSpaceWriteScope::current_native_module_ = - nullptr; +thread_local int CodeSpaceWriteScope::scope_depth_ = 0; // TODO(jkummerow): Background threads could permanently stay in // writable mode; only the main thread has to switch back and forth. -CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module) - : previous_native_module_(current_native_module_) { - if (!native_module) { - // Passing in a {nullptr} is OK if we don't use that pointer anyway. - // Internally, we need a non-nullptr though to know whether a scope is - // already open from looking at {current_native_module_}. - DCHECK(!SwitchingPerNativeModule()); - native_module = GetDummyNativeModule(); - } - if (previous_native_module_ == native_module) return; - current_native_module_ = native_module; - if (previous_native_module_ == nullptr || SwitchingPerNativeModule()) { - SetWritable(); - } +CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module) { + DCHECK_LE(0, scope_depth_); + if (++scope_depth_ == 1) SetWritable(); } CodeSpaceWriteScope::~CodeSpaceWriteScope() { - if (previous_native_module_ == current_native_module_) return; - if (previous_native_module_ == nullptr || SwitchingPerNativeModule()) { - SetExecutable(); - } - current_native_module_ = previous_native_module_; + DCHECK_LT(0, scope_depth_); + if (--scope_depth_ == 0) SetExecutable(); } #if V8_HAS_PTHREAD_JIT_WRITE_PROTECT @@ -64,17 +36,12 @@ void CodeSpaceWriteScope::SetExecutable() { RwxMemoryWriteScope::SetExecutable(); } -// static -bool CodeSpaceWriteScope::SwitchingPerNativeModule() { return false; } - #else // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT // static void CodeSpaceWriteScope::SetWritable() { if (WasmCodeManager::MemoryProtectionKeysEnabled()) { RwxMemoryWriteScope::SetWritable(); - } else if (v8_flags.wasm_write_protect_code_memory) { - current_native_module_->AddWriter(); } } @@ -83,17 +50,9 @@ void CodeSpaceWriteScope::SetExecutable() { if (WasmCodeManager::MemoryProtectionKeysEnabled()) { DCHECK(v8_flags.wasm_memory_protection_keys); RwxMemoryWriteScope::SetExecutable(); - } else if (v8_flags.wasm_write_protect_code_memory) { - current_native_module_->RemoveWriter(); } } -// static -bool CodeSpaceWriteScope::SwitchingPerNativeModule() { - return !WasmCodeManager::MemoryProtectionKeysEnabled() && - v8_flags.wasm_write_protect_code_memory; -} - #endif // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT } // namespace wasm diff --git a/src/wasm/code-space-access.h b/src/wasm/code-space-access.h index 659361612c..fd02e428dc 100644 --- a/src/wasm/code-space-access.h +++ b/src/wasm/code-space-access.h @@ -49,27 +49,16 @@ class V8_NODISCARD CodeSpaceWriteScope final { CodeSpaceWriteScope(const CodeSpaceWriteScope&) = delete; CodeSpaceWriteScope& operator=(const CodeSpaceWriteScope&) = delete; - static bool IsInScope() { return current_native_module_ != nullptr; } + static bool IsInScope() { + DCHECK_LE(0, scope_depth_); + return scope_depth_ != 0; + } private: - // The M1 implementation knows implicitly from the {MAP_JIT} flag during - // allocation which region to switch permissions for. On non-M1 hardware - // without memory protection key support, we need the code space from the - // {NativeModule}. - static thread_local NativeModule* current_native_module_; + static thread_local int scope_depth_; - // {SetWritable} and {SetExecutable} implicitly operate on - // {current_native_module_} (for mprotect-based protection). static void SetWritable(); static void SetExecutable(); - - // Returns {true} if switching permissions happens on a per-module level, and - // not globally (like for MAP_JIT and PKU). - static bool SwitchingPerNativeModule(); - - // Save the previous module to put it back in {current_native_module_} when - // exiting this scope. - NativeModule* const previous_native_module_; }; } // namespace v8::internal::wasm diff --git a/src/wasm/wasm-code-manager.cc b/src/wasm/wasm-code-manager.cc index 015f76929a..9a70ffa5cd 100644 --- a/src/wasm/wasm-code-manager.cc +++ b/src/wasm/wasm-code-manager.cc @@ -516,10 +516,7 @@ int WasmCode::GetSourcePositionBefore(int offset) { } WasmCodeAllocator::WasmCodeAllocator(std::shared_ptr async_counters) - : protect_code_memory_(!V8_HAS_PTHREAD_JIT_WRITE_PROTECT && - v8_flags.wasm_write_protect_code_memory && - !WasmCodeManager::MemoryProtectionKeysEnabled()), - async_counters_(std::move(async_counters)) { + : async_counters_(std::move(async_counters)) { owned_code_space_.reserve(4); } @@ -638,67 +635,6 @@ size_t ReservationSize(size_t code_size_estimate, int num_declared_functions, return reserve_size; } -#ifdef DEBUG -// Check postconditions when returning from this method: -// 1) {region} must be fully contained in {writable_memory_}; -// 2) {writable_memory_} must be a maximally merged ordered set of disjoint -// non-empty regions. -class CheckWritableMemoryRegions { - public: - CheckWritableMemoryRegions( - std::set& - writable_memory, - base::AddressRegion new_region, size_t& new_writable_memory) - : writable_memory_(writable_memory), - new_region_(new_region), - new_writable_memory_(new_writable_memory), - old_writable_size_(std::accumulate( - writable_memory_.begin(), writable_memory_.end(), size_t{0}, - [](size_t old, base::AddressRegion region) { - return old + region.size(); - })) {} - - ~CheckWritableMemoryRegions() { - // {new_region} must be contained in {writable_memory_}. - DCHECK(std::any_of( - writable_memory_.begin(), writable_memory_.end(), - [this](auto region) { return region.contains(new_region_); })); - - // The new total size of writable memory must have increased by - // {new_writable_memory}. - size_t total_writable_size = std::accumulate( - writable_memory_.begin(), writable_memory_.end(), size_t{0}, - [](size_t old, auto region) { return old + region.size(); }); - DCHECK_EQ(old_writable_size_ + new_writable_memory_, total_writable_size); - - // There are no empty regions. - DCHECK(std::none_of(writable_memory_.begin(), writable_memory_.end(), - [](auto region) { return region.is_empty(); })); - - // Regions are sorted and disjoint. (std::accumulate has nodiscard on msvc - // so USE is required to prevent build failures in debug builds). - USE(std::accumulate(writable_memory_.begin(), writable_memory_.end(), - Address{0}, [](Address previous_end, auto region) { - DCHECK_LT(previous_end, region.begin()); - return region.end(); - })); - } - - private: - const std::set& - writable_memory_; - const base::AddressRegion new_region_; - const size_t& new_writable_memory_; - const size_t old_writable_size_; -}; -#else // !DEBUG -class CheckWritableMemoryRegions { - public: - template - explicit CheckWritableMemoryRegions(Args...) {} -}; -#endif // !DEBUG - // Sentinel value to be used for {AllocateForCodeInRegion} for specifying no // restriction on the region to allocate in. constexpr base::AddressRegion kUnrestrictedRegion{ @@ -755,9 +691,6 @@ base::Vector WasmCodeAllocator::AllocateForCodeInRegion( } const Address commit_page_size = CommitPageSize(); Address commit_start = RoundUp(code_space.begin(), commit_page_size); - if (commit_start != code_space.begin()) { - MakeWritable({commit_start - commit_page_size, commit_page_size}); - } Address commit_end = RoundUp(code_space.end(), commit_page_size); // {commit_start} will be either code_space.start or the start of the next @@ -777,11 +710,6 @@ base::Vector WasmCodeAllocator::AllocateForCodeInRegion( // Committed code cannot grow bigger than maximum code space size. DCHECK_LE(committed_code_space_.load(), v8_flags.wasm_max_committed_code_mb * MB); - if (protect_code_memory_) { - DCHECK_LT(0, writers_count_); - InsertIntoWritableRegions({commit_start, commit_end - commit_start}, - false); - } } DCHECK(IsAligned(code_space.begin(), kCodeAlignment)); generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed); @@ -791,52 +719,6 @@ base::Vector WasmCodeAllocator::AllocateForCodeInRegion( return {reinterpret_cast(code_space.begin()), code_space.size()}; } -// TODO(dlehmann): Ensure that {AddWriter()} is always paired up with a -// {RemoveWriter}, such that eventually the code space is write protected. -// One solution is to make the API foolproof by hiding {SetWritable()} and -// allowing change of permissions only through {CodeSpaceWriteScope}. -// TODO(dlehmann): Add tests that ensure the code space is eventually write- -// protected. -void WasmCodeAllocator::AddWriter() { - DCHECK(protect_code_memory_); - ++writers_count_; -} - -void WasmCodeAllocator::RemoveWriter() { - DCHECK(protect_code_memory_); - DCHECK_GT(writers_count_, 0); - if (--writers_count_ > 0) return; - - // Switch all memory to non-writable. - v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); - for (base::AddressRegion writable : writable_memory_) { - for (base::AddressRegion split_range : - SplitRangeByReservationsIfNeeded(writable, owned_code_space_)) { - TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RX\n", - split_range.begin(), split_range.end()); - CHECK(SetPermissions(page_allocator, split_range.begin(), - split_range.size(), PageAllocator::kReadExecute)); - } - } - writable_memory_.clear(); -} - -void WasmCodeAllocator::MakeWritable(base::AddressRegion region) { - if (!protect_code_memory_) return; - DCHECK_LT(0, writers_count_); - DCHECK(!region.is_empty()); - v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); - - // Align to commit page size. - size_t commit_page_size = page_allocator->CommitPageSize(); - DCHECK(base::bits::IsPowerOfTwo(commit_page_size)); - Address begin = RoundDown(region.begin(), commit_page_size); - Address end = RoundUp(region.end(), commit_page_size); - region = base::AddressRegion(begin, end - begin); - - InsertIntoWritableRegions(region, true); -} - void WasmCodeAllocator::FreeCode(base::Vector codes) { // Zap code area and collect freed code regions. DisjointAllocationPool freed_regions; @@ -881,84 +763,6 @@ size_t WasmCodeAllocator::GetNumCodeSpaces() const { return owned_code_space_.size(); } -void WasmCodeAllocator::InsertIntoWritableRegions(base::AddressRegion region, - bool switch_to_writable) { - size_t new_writable_memory = 0; - - CheckWritableMemoryRegions check_on_return{writable_memory_, region, - new_writable_memory}; - - v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); - // Subroutine to make a non-writable region writable (if {switch_to_writable} - // is {true}) and insert it into {writable_memory_}. - auto make_writable = [&](decltype(writable_memory_)::iterator insert_pos, - base::AddressRegion region) { - new_writable_memory += region.size(); - if (switch_to_writable) { - for (base::AddressRegion split_range : - SplitRangeByReservationsIfNeeded(region, owned_code_space_)) { - TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RWX\n", - split_range.begin(), split_range.end()); - CHECK(SetPermissions(page_allocator, split_range.begin(), - split_range.size(), - PageAllocator::kReadWriteExecute)); - } - } - - // Insert {region} into {writable_memory_} before {insert_pos}, potentially - // merging it with the surrounding regions. - if (insert_pos != writable_memory_.begin()) { - auto previous = insert_pos; - --previous; - if (previous->end() == region.begin()) { - region = {previous->begin(), previous->size() + region.size()}; - writable_memory_.erase(previous); - } - } - if (insert_pos != writable_memory_.end() && - region.end() == insert_pos->begin()) { - region = {region.begin(), insert_pos->size() + region.size()}; - insert_pos = writable_memory_.erase(insert_pos); - } - writable_memory_.insert(insert_pos, region); - }; - - DCHECK(!region.is_empty()); - // Find a possible insertion position by identifying the first region whose - // start address is not less than that of {new_region}, and the starting the - // merge from the existing region before that. - auto it = writable_memory_.lower_bound(region); - if (it != writable_memory_.begin()) --it; - for (;; ++it) { - if (it == writable_memory_.end() || it->begin() >= region.end()) { - // No overlap; add before {it}. - make_writable(it, region); - return; - } - if (it->end() <= region.begin()) continue; // Continue after {it}. - base::AddressRegion overlap = it->GetOverlap(region); - DCHECK(!overlap.is_empty()); - if (overlap.begin() == region.begin()) { - if (overlap.end() == region.end()) return; // Fully contained already. - // Remove overlap (which is already writable) and continue. - region = {overlap.end(), region.end() - overlap.end()}; - continue; - } - if (overlap.end() == region.end()) { - // Remove overlap (which is already writable), then make the remaining - // region writable. - region = {region.begin(), overlap.begin() - region.begin()}; - make_writable(it, region); - return; - } - // Split {region}, make the split writable, and continue with the rest. - base::AddressRegion split = {region.begin(), - overlap.begin() - region.begin()}; - make_writable(it, split); - region = {overlap.end(), region.end() - overlap.end()}; - } -} - namespace { BoundsCheckStrategy GetBoundsChecks(const WasmModule* module) { if (!v8_flags.wasm_bounds_checks) return kNoBoundsChecks; @@ -1558,22 +1362,6 @@ void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data, DCHECK_NOT_NULL(code_space_data.jump_table); DCHECK_NOT_NULL(code_space_data.far_jump_table); - // Jump tables are often allocated next to each other, so we can switch - // permissions on both at the same time. - if (code_space_data.jump_table->instructions().end() == - code_space_data.far_jump_table->instructions().begin()) { - base::Vector jump_tables_space = base::VectorOf( - code_space_data.jump_table->instructions().begin(), - code_space_data.jump_table->instructions().size() + - code_space_data.far_jump_table->instructions().size()); - code_allocator_.MakeWritable(AddressRegionOf(jump_tables_space)); - } else { - code_allocator_.MakeWritable( - AddressRegionOf(code_space_data.jump_table->instructions())); - code_allocator_.MakeWritable( - AddressRegionOf(code_space_data.far_jump_table->instructions())); - } - DCHECK_LT(slot_index, module_->num_declared_functions); Address jump_table_slot = code_space_data.jump_table->instruction_start() + @@ -1955,23 +1743,13 @@ void WasmCodeManager::Commit(base::AddressRegion region) { break; } } - // Even when we employ W^X with v8_flags.wasm_write_protect_code_memory == - // true, code pages need to be initially allocated with RWX permission because - // of concurrent compilation/execution. For this reason there is no - // distinction here based on v8_flags.wasm_write_protect_code_memory. - // TODO(dlehmann): This allocates initially as writable and executable, and - // as such is not safe-by-default. In particular, if - // {WasmCodeAllocator::SetWritable(false)} is never called afterwards (e.g., - // because no {CodeSpaceWriteScope} is created), the writable permission is - // never withdrawn. - // One potential fix is to allocate initially with kReadExecute only, which - // forces all compilation threads to add the missing {CodeSpaceWriteScope}s - // before modification; and/or adding DCHECKs that {CodeSpaceWriteScope} is - // open when calling this method. + // Allocate with RWX permissions; this will be restricted via PKU if + // available and enabled. PageAllocator::Permission permission = PageAllocator::kReadWriteExecute; bool success = false; if (MemoryProtectionKeysEnabled()) { + DCHECK(CodeSpaceWriteScope::IsInScope()); #if V8_HAS_PKU_JIT_WRITE_PROTECT TRACE_HEAP( "Setting rwx permissions and memory protection key for 0x%" PRIxPTR diff --git a/src/wasm/wasm-code-manager.h b/src/wasm/wasm-code-manager.h index f207ca096c..3b63e61b4d 100644 --- a/src/wasm/wasm-code-manager.h +++ b/src/wasm/wasm-code-manager.h @@ -546,20 +546,6 @@ class WasmCodeAllocator { base::Vector AllocateForCodeInRegion(NativeModule*, size_t size, base::AddressRegion); - // Increases or decreases the {writers_count_} field. While there is at least - // one writer, it is allowed to call {MakeWritable} to make regions writable. - // When the last writer is removed, all code is switched back to - // write-protected. - // Hold the {NativeModule}'s {allocation_mutex_} when calling one of these - // methods. The methods should only be called via {CodeSpaceWriteScope}. - V8_EXPORT_PRIVATE void AddWriter(); - V8_EXPORT_PRIVATE void RemoveWriter(); - - // Make a code region writable. Only allowed if there is at lease one writer - // (see above). - // Hold the {NativeModule}'s {allocation_mutex_} when calling this method. - V8_EXPORT_PRIVATE void MakeWritable(base::AddressRegion); - // Free memory pages of all given code objects. Used for wasm code GC. // Hold the {NativeModule}'s {allocation_mutex_} when calling this method. void FreeCode(base::Vector); @@ -571,9 +557,6 @@ class WasmCodeAllocator { Counters* counters() const { return async_counters_.get(); } private: - void InsertIntoWritableRegions(base::AddressRegion region, - bool switch_to_writable); - ////////////////////////////////////////////////////////////////////////////// // These fields are protected by the mutex in {NativeModule}. @@ -585,18 +568,9 @@ class WasmCodeAllocator { DisjointAllocationPool freed_code_space_; std::vector owned_code_space_; - // The following two fields are only used if {protect_code_memory_} is true. - int writers_count_{0}; - std::set - writable_memory_; - // End of fields protected by {mutex_}. ////////////////////////////////////////////////////////////////////////////// - // {protect_code_memory_} is true if traditional memory permission switching - // is used to protect code space. It is false if {MAP_JIT} on Mac or PKU is - // being used, or protection is completely disabled. - const bool protect_code_memory_; std::atomic committed_code_space_{0}; std::atomic generated_code_size_{0}; std::atomic freed_code_size_{0}; @@ -701,21 +675,6 @@ class V8_EXPORT_PRIVATE NativeModule final { // to a function index. uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const; - void AddWriter() { - base::RecursiveMutexGuard guard{&allocation_mutex_}; - code_allocator_.AddWriter(); - } - - void RemoveWriter() { - base::RecursiveMutexGuard guard{&allocation_mutex_}; - code_allocator_.RemoveWriter(); - } - - void MakeWritable(base::AddressRegion region) { - base::RecursiveMutexGuard guard{&allocation_mutex_}; - code_allocator_.MakeWritable(region); - } - // For cctests, where we build both WasmModule and the runtime objects // on the fly, and bypass the instance builder pipeline. void ReserveCodeTableForTesting(uint32_t max_functions); diff --git a/test/unittests/wasm/memory-protection-unittest.cc b/test/unittests/wasm/memory-protection-unittest.cc index 0d02549b6e..120197bba0 100644 --- a/test/unittests/wasm/memory-protection-unittest.cc +++ b/test/unittests/wasm/memory-protection-unittest.cc @@ -21,15 +21,11 @@ #include "test/unittests/test-utils.h" #include "testing/gmock/include/gmock/gmock-matchers.h" -namespace v8 { -namespace internal { -namespace wasm { +namespace v8::internal::wasm { enum MemoryProtectionMode { kNoProtection, kPku, - kMprotect, - kPkuWithMprotectFallback }; const char* MemoryProtectionModeToString(MemoryProtectionMode mode) { @@ -38,10 +34,6 @@ const char* MemoryProtectionModeToString(MemoryProtectionMode mode) { return "NoProtection"; case kPku: return "Pku"; - case kMprotect: - return "Mprotect"; - case kPkuWithMprotectFallback: - return "PkuWithMprotectFallback"; } } @@ -50,15 +42,10 @@ class MemoryProtectionTest : public TestWithNativeContext { void Initialize(MemoryProtectionMode mode) { v8_flags.wasm_lazy_compilation = false; mode_ = mode; - bool enable_pku = mode == kPku || mode == kPkuWithMprotectFallback; - v8_flags.wasm_memory_protection_keys = enable_pku; + v8_flags.wasm_memory_protection_keys = (mode == kPku); // The key is initially write-protected. CHECK_IMPLIES(WasmCodeManager::HasMemoryProtectionKeySupport(), !WasmCodeManager::MemoryProtectionKeyWritable()); - - bool enable_mprotect = - mode == kMprotect || mode == kPkuWithMprotectFallback; - v8_flags.wasm_write_protect_code_memory = enable_mprotect; } void CompileModule() { @@ -72,11 +59,7 @@ class MemoryProtectionTest : public TestWithNativeContext { WasmCode* code() const { return code_; } bool code_is_protected() { - return V8_HAS_PTHREAD_JIT_WRITE_PROTECT || uses_pku() || uses_mprotect(); - } - - void MakeCodeWritable() { - native_module_->MakeWritable(base::AddressRegionOf(code_->instructions())); + return V8_HAS_PTHREAD_JIT_WRITE_PROTECT || uses_pku(); } void WriteToCode() { code_->instructions()[0] = 0; } @@ -87,28 +70,18 @@ class MemoryProtectionTest : public TestWithNativeContext { WriteToCode(); return; } - // Tier-up might be running and unprotecting the code region temporarily (if - // using mprotect). In that case, repeatedly write to the code region to - // make us eventually crash. ASSERT_DEATH_IF_SUPPORTED( - do { + { WriteToCode(); base::OS::Sleep(base::TimeDelta::FromMilliseconds(10)); - } while (uses_mprotect()), + }, ""); } - bool uses_mprotect() { - // M1 always uses MAP_JIT. - if (V8_HAS_PTHREAD_JIT_WRITE_PROTECT) return false; - return mode_ == kMprotect || - (mode_ == kPkuWithMprotectFallback && !uses_pku()); - } - bool uses_pku() { // M1 always uses MAP_JIT. if (V8_HAS_PTHREAD_JIT_WRITE_PROTECT) return false; - bool param_has_pku = mode_ == kPku || mode_ == kPkuWithMprotectFallback; + bool param_has_pku = mode_ == kPku; return param_has_pku && WasmCodeManager::HasMemoryProtectionKeySupport(); } @@ -157,8 +130,7 @@ std::string PrintMemoryProtectionTestParam( } INSTANTIATE_TEST_SUITE_P(MemoryProtection, ParameterizedMemoryProtectionTest, - ::testing::Values(kNoProtection, kPku, kMprotect, - kPkuWithMprotectFallback), + ::testing::Values(kNoProtection, kPku), PrintMemoryProtectionTestParam); TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterCompilation) { @@ -169,7 +141,6 @@ TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterCompilation) { TEST_P(ParameterizedMemoryProtectionTest, CodeWritableWithinScope) { CompileModule(); CodeSpaceWriteScope write_scope(native_module()); - MakeCodeWritable(); WriteToCode(); } @@ -177,7 +148,6 @@ TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterScope) { CompileModule(); { CodeSpaceWriteScope write_scope(native_module()); - MakeCodeWritable(); WriteToCode(); } AssertCodeEventuallyProtected(); @@ -264,8 +234,7 @@ std::string PrintMemoryProtectionAndSignalHandlingTestParam( INSTANTIATE_TEST_SUITE_P( MemoryProtection, ParameterizedMemoryProtectionTestWithSignalHandling, - ::testing::Combine(::testing::Values(kNoProtection, kPku, kMprotect, - kPkuWithMprotectFallback), + ::testing::Combine(::testing::Values(kNoProtection, kPku), ::testing::Bool(), ::testing::Bool()), PrintMemoryProtectionAndSignalHandlingTestParam); @@ -303,16 +272,12 @@ TEST_P(ParameterizedMemoryProtectionTestWithSignalHandling, TestSignalHandler) { // second parameter, and not a matcher as {ASSERT_DEATH}. #if GTEST_HAS_DEATH_TEST ASSERT_DEATH( - // The signal handler should crash, but it might "accidentally" - // succeed if tier-up is running in the background and using mprotect - // to unprotect the code for the whole process. In that case we - // repeatedly send the signal until we crash. - do { + { base::Optional write_scope; if (open_write_scope) write_scope.emplace(native_module()); pthread_kill(pthread_self(), SIGPROF); base::OS::Sleep(base::TimeDelta::FromMilliseconds(10)); - } while (uses_mprotect()), // Only loop for mprotect. + }, // Check that the subprocess tried to write, but did not succeed. ::testing::AnyOf( // non-sanitizer builds: @@ -340,6 +305,4 @@ TEST_P(ParameterizedMemoryProtectionTestWithSignalHandling, TestSignalHandler) { } #endif // V8_OS_POSIX && !V8_OS_FUCHSIA -} // namespace wasm -} // namespace internal -} // namespace v8 +} // namespace v8::internal::wasm From 8d54971115287f663708ed2825511678fa92ccfb Mon Sep 17 00:00:00 2001 From: pthier Date: Tue, 3 Jan 2023 17:36:38 +0100 Subject: [PATCH 130/654] [test] Fix cctest/test-strings/Regress1402187 for non sandbox builds Increase length of strings in the test to ensure they are cachable external strings even when the sandbox is disabled. Change-Id: I1228e1abb1d88c0bb70edaeb718e1bf2f4cdd53d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4127228 Reviewed-by: Victor Gomes Auto-Submit: Patrick Thier Commit-Queue: Patrick Thier Cr-Commit-Position: refs/heads/main@{#85081} --- test/cctest/test-strings.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc index 1f12175879..c291d3da5b 100644 --- a/test/cctest/test-strings.cc +++ b/test/cctest/test-strings.cc @@ -1398,16 +1398,17 @@ TEST(Regress1402187) { i::Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); // This won't leak; the external string mechanism will call Dispose() on it. - const char ext_string_content[] = "prop-1234567"; - OneByteVectorResource* resource = new OneByteVectorResource( - v8::base::Vector(ext_string_content, 12)); + const char ext_string_content[] = "prop-1234567890asdf"; + OneByteVectorResource* resource = + new OneByteVectorResource(v8::base::Vector( + ext_string_content, strlen(ext_string_content))); const uint32_t fake_hash = String::CreateHashFieldValue(4711, String::HashFieldType::kHash); { v8::HandleScope scope(CcTest::isolate()); // Internalize a string with the same hash to ensure collision. Handle intern = isolate->factory()->NewStringFromAsciiChecked( - "internalized", AllocationType::kOld); + "internalized1234567", AllocationType::kOld); intern->set_raw_hash_field(fake_hash); factory->InternalizeName(intern); CHECK(intern->IsInternalizedString()); @@ -1418,6 +1419,7 @@ TEST(Regress1402187) { Handle string = v8::Utils::OpenHandle(*ext_string); string->set_raw_hash_field(fake_hash); CHECK(string->IsExternalString()); + CHECK(!StringShape(*string).IsUncachedExternal()); CHECK(!string->IsInternalizedString()); CHECK(!String::Equals(isolate, string, intern)); CHECK_EQ(string->hash(), intern->hash()); From f0989ad16811947dd6e2e2e13cb02868559b047c Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Tue, 3 Jan 2023 17:02:18 +0100 Subject: [PATCH 131/654] [maglev][arm64] Use Cmp instead of cmp Cmp can deal with large immediates (in particular Smi::kMaxValue). Bug: v8:7700 Change-Id: I4dedb6c52f263f626f924c0465acbd5a250b7fd5 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4127227 Auto-Submit: Victor Gomes Commit-Queue: Darius Mercadier Reviewed-by: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85082} --- src/maglev/arm64/maglev-ir-arm64.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index bbfc80545f..20ebd60523 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -938,7 +938,7 @@ void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm, UseScratchRegisterScope temps(masm); Register temp = temps.AcquireW(); __ orr(temp, left, right); - __ cmp(temp, Immediate(0)); + __ Cmp(temp, Immediate(0)); // If one of them is negative, we must have a -0 result, which is non-int32, // so deopt. // TODO(leszeks): Consider splitting these deopts to have distinct deopt @@ -1092,7 +1092,7 @@ void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm, __ neg(lhs, lhs); __ udiv(res, lhs, rhs); __ msub(out, res, rhs, lhs); - __ cmp(out, Immediate(0)); + __ Cmp(out, Immediate(0)); // TODO(victorgomes): This ideally should be kMinusZero, but Maglev // only allows one deopt reason per IR. __ EmitEagerDeoptIf(eq, deopt_reason, node); @@ -1520,7 +1520,7 @@ void UnsafeSmiTag::GenerateCode(MaglevAssembler* masm, if (v8_flags.debug_code) { if (input().node()->properties().value_representation() == ValueRepresentation::kUint32) { - __ cmp(reg, Immediate(Smi::kMaxValue)); + __ Cmp(reg, Immediate(Smi::kMaxValue)); __ Check(ls, AbortReason::kInputDoesNotFitSmi); } } From a934b72483b6407f6a4e4de1df8d66af3056d79b Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Tue, 3 Jan 2023 10:49:29 -0800 Subject: [PATCH 132/654] [string-iswellformed] Fix isWellFormed for indirect strings Bug: chromium:1403546, v8:13557 Change-Id: Ifb96207022eef451f10cdba92519e97e452d884e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4131800 Reviewed-by: Adam Klein Commit-Queue: Shu-yu Guo Cr-Commit-Position: refs/heads/main@{#85083} --- src/objects/string-inl.h | 5 +-- src/runtime/runtime-strings.cc | 5 +-- .../string-iswellformed-flat-indirect.js | 35 +++++++++++++++++++ 3 files changed, 41 insertions(+), 4 deletions(-) create mode 100644 test/mjsunit/harmony/string-iswellformed-flat-indirect.js diff --git a/src/objects/string-inl.h b/src/objects/string-inl.h index efbdd12e00..46e89cfc2c 100644 --- a/src/objects/string-inl.h +++ b/src/objects/string-inl.h @@ -966,9 +966,10 @@ bool String::IsWellFormedUnicode(Isolate* isolate, Handle string) { // InstanceType. See // https://docs.google.com/document/d/15f-1c_Ysw3lvjy_Gx0SmmD9qeO8UuXuAbWIpWCnTDO8/ string = Flatten(isolate, string); - DCHECK(string->IsTwoByteRepresentation()); DisallowGarbageCollection no_gc; - const uint16_t* data = string->template GetChars(isolate, no_gc); + String::FlatContent flat = string->GetFlatContent(no_gc); + DCHECK(flat.IsFlat()); + const uint16_t* data = flat.ToUC16Vector().begin(); return !unibrow::Utf16::HasUnpairedSurrogate(data, string->length()); } diff --git a/src/runtime/runtime-strings.cc b/src/runtime/runtime-strings.cc index 189cc4c28c..e451c5ebb8 100644 --- a/src/runtime/runtime-strings.cc +++ b/src/runtime/runtime-strings.cc @@ -490,8 +490,9 @@ RUNTIME_FUNCTION(Runtime_StringToWellFormed) { Handle dest = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked(); DisallowGarbageCollection no_gc; - const uint16_t* source_data = - source->template GetChars(isolate, no_gc); + String::FlatContent source_contents = source->GetFlatContent(no_gc); + DCHECK(source_contents.IsFlat()); + const uint16_t* source_data = source_contents.ToUC16Vector().begin(); uint16_t* dest_data = dest->GetChars(no_gc); unibrow::Utf16::ReplaceUnpairedSurrogates(source_data, dest_data, length); return *dest; diff --git a/test/mjsunit/harmony/string-iswellformed-flat-indirect.js b/test/mjsunit/harmony/string-iswellformed-flat-indirect.js new file mode 100644 index 0000000000..282944f821 --- /dev/null +++ b/test/mjsunit/harmony/string-iswellformed-flat-indirect.js @@ -0,0 +1,35 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --harmony-string-is-well-formed + +(function TestSliced() { + // toString on the function returns a sliced string on the script source, + // which is 2-byte. + const fooString = foo.toString(); + assertTrue(fooString.isWellFormed()); + assertEquals(fooString, fooString.toWellFormed()); +})(); + +function TestCons(a, b) { + const s = a + b; + // Flatten it before calling isWellFormed to get a flat cons. + s.endsWith('a'); + assertTrue(s.isWellFormed()); + assertEquals(s, s.toWellFormed()); +} +TestCons('�', '�'); + +function TestThin(a, b) { + const s = a + b; + const o = {}; + o[s]; + assertTrue(s.isWellFormed()); + assertEquals(s, s.toWellFormed()); +} +TestThin('�', '�'); + +function foo() {} +// Make this source file 2-byte. +'�'; From bf19099f3ad95bc8250c3c66b3a9ea4134c42662 Mon Sep 17 00:00:00 2001 From: JianxiaoLuIntel Date: Wed, 21 Dec 2022 11:06:01 +0800 Subject: [PATCH 133/654] Reland "[turbofan] Simplifying (x+k1)==k2 into x==(k2-k1)" This is a reland of commit e9333ebd3cfbb27af8d6f61bdd32de5b242f4a35 Fix UB int overflow Original change's description: > [turbofan] Simplifying (x+k1)==k2 into x==(k2-k1) > > > Change-Id: I234da79e1f53fa0fc15494fe6d31742d4e6eea97 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4055393 > Commit-Queue: Jianxiao Lu > Reviewed-by: Tobias Tebbi > Cr-Commit-Position: refs/heads/main@{#84947} Change-Id: Ib79fd496147f5c7f33846168908c36ebb5229208 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4119767 Commit-Queue: Jianxiao Lu Reviewed-by: Tobias Tebbi Cr-Commit-Position: refs/heads/main@{#85084} --- src/compiler/machine-operator-reducer.cc | 29 +++++++ .../machine-operator-reducer-unittest.cc | 75 +++++++++++++++++++ 2 files changed, 104 insertions(+) diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc index 5b21a1d652..9253c06fce 100644 --- a/src/compiler/machine-operator-reducer.cc +++ b/src/compiler/machine-operator-reducer.cc @@ -5,6 +5,7 @@ #include "src/compiler/machine-operator-reducer.h" #include +#include #include #include "src/base/bits.h" @@ -2551,6 +2552,20 @@ Reduction MachineOperatorReducer::ReduceWord32Equal(Node* node) { node->ReplaceInput(1, Uint32Constant(replacements->second)); return Changed(node); } + + // Simplifying (x+k1)==k2 into x==k2-k1. + if (m.left().IsInt32Add() && m.right().IsInt32Constant()) { + Int32AddMatcher m_add(m.left().node()); + if (m_add.right().IsInt32Constant()) { + int32_t lte_right = m.right().ResolvedValue(); + int32_t add_right = m_add.right().ResolvedValue(); + // No need to consider overflow in this condition (==). + node->ReplaceInput(0, m_add.left().node()); + node->ReplaceInput(1, Int32Constant(static_cast(lte_right) - + static_cast(add_right))); + return Changed(node); + } + } } return NoChange(); @@ -2579,6 +2594,20 @@ Reduction MachineOperatorReducer::ReduceWord64Equal(Node* node) { return Changed(node); } + // Simplifying (x+k1)==k2 into x==k2-k1. + if (m.left().IsInt64Add() && m.right().IsInt64Constant()) { + Int64AddMatcher m_add(m.left().node()); + if (m_add.right().IsInt64Constant()) { + int64_t lte_right = m.right().ResolvedValue(); + int64_t add_right = m_add.right().ResolvedValue(); + // No need to consider overflow in this condition (==). + node->ReplaceInput(0, m_add.left().node()); + node->ReplaceInput(1, Int64Constant(static_cast(lte_right) - + static_cast(add_right))); + return Changed(node); + } + } + /* If Int64Constant(c) can be casted from an Int32Constant: ------------------------------------------------- diff --git a/test/unittests/compiler/machine-operator-reducer-unittest.cc b/test/unittests/compiler/machine-operator-reducer-unittest.cc index 9fa005ff72..669f941148 100644 --- a/test/unittests/compiler/machine-operator-reducer-unittest.cc +++ b/test/unittests/compiler/machine-operator-reducer-unittest.cc @@ -4,6 +4,7 @@ #include "src/compiler/machine-operator-reducer.h" +#include #include #include "src/base/bits.h" @@ -11,6 +12,7 @@ #include "src/base/ieee754.h" #include "src/base/overflowing-math.h" #include "src/builtins/builtins.h" +#include "src/common/globals.h" #include "src/compiler/js-graph.h" #include "src/compiler/machine-operator.h" #include "src/numbers/conversions-inl.h" @@ -1398,6 +1400,21 @@ TEST_F(MachineOperatorReducerTest, } } +TEST_F(MachineOperatorReducerTest, Word32EqualWithAddAndConstant) { + // (x+k1)==k2 => x==(k2-k1) + Node* const p0 = Parameter(0); + TRACED_FOREACH(int32_t, k1, kInt32Values) { + TRACED_FOREACH(int32_t, k2, kInt32Values) { + Node* node = graph()->NewNode( + machine()->Word32Equal(), + graph()->NewNode(machine()->Int32Add(), p0, Int32Constant(k1)), + Int32Constant(k2)); + Reduction r = Reduce(node); + ASSERT_TRUE(r.Changed()); + } + } +} + // ----------------------------------------------------------------------------- // Word64Equal @@ -1436,6 +1453,21 @@ TEST_F(MachineOperatorReducerTest, } } +TEST_F(MachineOperatorReducerTest, Word64EqualWithAddAndConstant) { + // (x+k1)==k2 => x==(k2-k1) + Node* const p0 = Parameter(0); + TRACED_FOREACH(int64_t, k1, kInt64Values) { + TRACED_FOREACH(int64_t, k2, kInt64Values) { + Node* node = graph()->NewNode( + machine()->Word64Equal(), + graph()->NewNode(machine()->Int64Add(), p0, Int64Constant(k1)), + Int64Constant(k2)); + Reduction r = Reduce(node); + ASSERT_TRUE(r.Changed()); + } + } +} + // ----------------------------------------------------------------------------- // Branch @@ -2584,6 +2616,49 @@ TEST_F(MachineOperatorReducerTest, Uint64LessThanWithUint32Reduction) { } } +TEST_F(MachineOperatorReducerTest, Uint64LessThanWithInt64AddDontReduce) { + Node* const p0 = Parameter(0); + + TRACED_FOREACH(uint64_t, k1, kUint64Values) { + TRACED_FOREACH(uint64_t, k2, kUint64Values) { + Node* node = graph()->NewNode( + machine()->Uint64LessThan(), + graph()->NewNode(machine()->Int64Add(), p0, Int64Constant(k1)), + Int64Constant(k2)); + Reduction r = Reduce(node); + // Don't reduce because of potential overflow + ASSERT_FALSE(r.Changed()); + } + } +} + +TEST_F(MachineOperatorReducerTest, + Uint64LessThanOrEqualWithInt64AddDontReduce) { + Node* const p0 = Parameter(0); + + TRACED_FOREACH(uint64_t, k1, kUint64Values) { + TRACED_FOREACH(uint64_t, k2, kUint64Values) { + uint64_t k1 = 0; + uint64_t k2 = 18446744073709551615u; + Node* node = graph()->NewNode( + machine()->Uint64LessThanOrEqual(), + graph()->NewNode(machine()->Int64Add(), p0, Int64Constant(k1)), + Int64Constant(k2)); + Reduction r = Reduce(node); + if (k2 == 0) { + // x <= 0 => x == 0 + ASSERT_TRUE(r.Changed()); + } else if (k2 == std::numeric_limits::max()) { + // x <= Max => true + ASSERT_TRUE(r.Changed()); + } else { + // Don't reduce because of potential overflow + ASSERT_FALSE(r.Changed()); + } + } + } +} + // ----------------------------------------------------------------------------- // Int64LessThan From dba5c526af85d3c05ad8a43d0822d337aebf82c4 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Tue, 3 Jan 2023 10:44:17 +0800 Subject: [PATCH 134/654] [loong64][mips64][wasm] Fix printing of wasm-to-js frames Port commit e17eee4894be67f715a7b2d7f17d8b69724f1cf8 Change-Id: I7a4f68706f9691647f2dc47e8534e3cb356dd945 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128274 Commit-Queue: Zhao Jiazhong Auto-Submit: Liu Yu Reviewed-by: Zhao Jiazhong Cr-Commit-Position: refs/heads/main@{#85085} --- src/compiler/backend/loong64/code-generator-loong64.cc | 4 ++++ src/compiler/backend/mips64/code-generator-mips64.cc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/compiler/backend/loong64/code-generator-loong64.cc b/src/compiler/backend/loong64/code-generator-loong64.cc index 3a56c96e92..30ec1522d6 100644 --- a/src/compiler/backend/loong64/code-generator-loong64.cc +++ b/src/compiler/backend/loong64/code-generator-loong64.cc @@ -2242,6 +2242,10 @@ void CodeGenerator::AssembleConstructFrame() { if (call_descriptor->IsWasmFunctionCall() || call_descriptor->IsWasmImportWrapper() || call_descriptor->IsWasmCapiFunction()) { + // For import wrappers and C-API functions, this stack slot is only used + // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef + // instead of the instance itself, which is taken care of in the frames + // accessors. __ Push(kWasmInstanceRegister); } if (call_descriptor->IsWasmCapiFunction()) { diff --git a/src/compiler/backend/mips64/code-generator-mips64.cc b/src/compiler/backend/mips64/code-generator-mips64.cc index a85ce470d9..97fabbd1cc 100644 --- a/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/src/compiler/backend/mips64/code-generator-mips64.cc @@ -4189,6 +4189,10 @@ void CodeGenerator::AssembleConstructFrame() { if (call_descriptor->IsWasmFunctionCall() || call_descriptor->IsWasmImportWrapper() || call_descriptor->IsWasmCapiFunction()) { + // For import wrappers and C-API functions, this stack slot is only used + // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef + // instead of the instance itself, which is taken care of in the frames + // accessors. __ Push(kWasmInstanceRegister); } if (call_descriptor->IsWasmCapiFunction()) { From 419a1c716c44c6418f2451382260234577697b7d Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Tue, 3 Jan 2023 19:11:48 -0800 Subject: [PATCH 135/654] Update V8 DEPS (trusted) Rolling v8/base/trace_event/common: https://chromium.googlesource.com/chromium/src/base/trace_event/common/+log/521ac34..68e6038 Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/5129278..44b5138 Rolling v8/third_party/depot_tools: https://chromium.googlesource.com/chromium/tools/depot_tools/+log/5b0c934..252b198 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20230102.2.1..version:11.20230103.1.1 Change-Id: I01351584aeabfa611ab7e2c2b161b49bae753348 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4133310 Bot-Commit: v8-ci-autoroll-builder Commit-Queue: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85086} --- DEPS | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/DEPS b/DEPS index 445ebcac13..a8257c66b2 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20230102.2.1', + 'fuchsia_version': 'version:11.20230103.1.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -103,9 +103,9 @@ vars = { deps = { 'base/trace_event/common': - Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', + Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68e6038b5350cba18c341cc7c572170af5c5b20c', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '51292789ecae63df94fa2058407cdc7c7b886b6c', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '44b5138cf5012ca0e661db0ff0f723757ff6b2d6', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf8d11e41138ca777f3eaa09df41bf968c8be6ba', 'buildtools/clang_format/script': @@ -217,7 +217,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '5b0c93402623ee632077ae073555867a0f984fc6', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '252b19866a6a9f3de069363184e5fca72280e558', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { From 166fd2f38f44dec856367eda95bd3bdcc4b4644f Mon Sep 17 00:00:00 2001 From: gengjiawen Date: Sat, 31 Dec 2022 22:55:24 -0800 Subject: [PATCH 136/654] [cppgc]: Fix build on msvc Fixes compilation with msvc 2019 toolchain. See: nodejs/node#37330 (comment) Bug: v8:12661 Change-Id: I7cfd87a3dd531a2e4913d82b743fb8ecdfdb5ed8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3533019 Reviewed-by: Michael Lippautz Commit-Queue: Michael Lippautz Cr-Commit-Position: refs/heads/main@{#85087} --- include/v8-cppgc.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/v8-cppgc.h b/include/v8-cppgc.h index 139af8fdac..4a457027c9 100644 --- a/include/v8-cppgc.h +++ b/include/v8-cppgc.h @@ -77,6 +77,15 @@ struct WrapperDescriptor final { }; struct V8_EXPORT CppHeapCreateParams { + CppHeapCreateParams( + std::vector> custom_spaces, + WrapperDescriptor wrapper_descriptor) + : custom_spaces(std::move(custom_spaces)), + wrapper_descriptor(wrapper_descriptor) {} + + CppHeapCreateParams(const CppHeapCreateParams&) = delete; + CppHeapCreateParams& operator=(const CppHeapCreateParams&) = delete; + std::vector> custom_spaces; WrapperDescriptor wrapper_descriptor; /** From da8ef354e8ff1640f7361898a62f5c44f591950f Mon Sep 17 00:00:00 2001 From: Nico Hartmann Date: Wed, 4 Jan 2023 12:13:22 +0100 Subject: [PATCH 137/654] [turboshaft] Implement typing of remaining FloatBinop operations Bug: v8:12783 Change-Id: I7a5bed4e349c8ced519469602716b132fe702aa0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4061357 Reviewed-by: Darius Mercadier Commit-Queue: Nico Hartmann Cr-Commit-Position: refs/heads/main@{#85088} --- .../turboshaft/assert-types-reducer.h | 1 + .../turboshaft/type-inference-reducer.h | 342 ++++++++++++++---- test/mjsunit/turboshaft/type-inference.js | 38 +- 3 files changed, 313 insertions(+), 68 deletions(-) diff --git a/src/compiler/turboshaft/assert-types-reducer.h b/src/compiler/turboshaft/assert-types-reducer.h index 62b3c9ed0b..7a59addcb3 100644 --- a/src/compiler/turboshaft/assert-types-reducer.h +++ b/src/compiler/turboshaft/assert-types-reducer.h @@ -60,6 +60,7 @@ class AssertTypesReducer : public Next { Type type = TypeOf(index); if (type.IsInvalid()) return index; // For now allow Type::Any(). + // TODO(nicohartmann@): Remove this once all operations are supported. if (type.IsAny()) return index; DetectReentranceScope reentrance_scope(&emitting_asserts_); diff --git a/src/compiler/turboshaft/type-inference-reducer.h b/src/compiler/turboshaft/type-inference-reducer.h index 9d0a28749a..435c2abd58 100644 --- a/src/compiler/turboshaft/type-inference-reducer.h +++ b/src/compiler/turboshaft/type-inference-reducer.h @@ -264,6 +264,25 @@ struct FloatOperationTyper { elements, maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone); } + static bool IsIntegerSet(const type_t& t) { + if (!t.is_set()) return false; + int size = t.set_size(); + DCHECK_LT(0, size); + + float_t unused_ipart; + float_t min = t.set_element(0); + if (std::modf(min, &unused_ipart) != 0.0) return false; + if (min == -std::numeric_limits::infinity()) return false; + float_t max = t.set_element(size - 1); + if (std::modf(max, &unused_ipart) != 0.0) return false; + if (max == std::numeric_limits::infinity()) return false; + + for (int i = 1; i < size - 1; ++i) { + if (std::modf(t.set_element(i), &unused_ipart) != 0.0) return false; + } + return true; + } + // Tries to construct the product of two sets where values are generated using // {combine}. Returns Type::Invalid() if a set cannot be constructed (e.g. // because the result exceeds the maximal number of set elements). @@ -360,6 +379,206 @@ struct FloatOperationTyper { return Range(result_min, result_max, maybe_nan, zone); } + static Type Multiply(const type_t& l, const type_t& r, Zone* zone) { + if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN(); + bool maybe_nan = l.has_nan() || r.has_nan(); + + // If both sides are decently small sets, we produce the product set. + auto combine = [](float_t a, float_t b) { return a * b; }; + if (l.is_set() && r.is_set()) { + auto result = ProductSet(l, r, maybe_nan, zone, combine); + if (!result.IsInvalid()) return result; + } + + // Otherwise just construct a range. + auto [l_min, l_max] = l.minmax(); + auto [r_min, r_max] = r.minmax(); + + std::array results; + results[0] = l_min * r_min; + results[1] = l_min * r_max; + results[2] = l_max * r_min; + results[3] = l_max * r_max; + + for (int i = 0; i < 4; ++i) { + if (std::isnan(results[i])) { + return type_t::Any(); + } + } + + if (((l_min == -inf || l_max == inf) && (r_min <= 0.0 && 0.0 <= r_max)) || + ((r_min == -inf || r_max == inf) && (l_min <= 0.0 && 0.0 <= l_max))) { + maybe_nan = true; + } + + const float_t result_min = array_min(results); + const float_t result_max = array_max(results); + type_t type = Range(result_min, result_max, maybe_nan, zone); + DCHECK_IMPLIES( + result_min <= 0.0 && 0.0 <= result_max && (l_min < 0.0 || r_min < 0.0), + type.Contains(-0.0)); + return type; + } + + static Type Divide(const type_t& l, const type_t& r, Zone* zone) { + if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN(); + bool maybe_nan = l.has_nan() || r.has_nan(); + + // If both sides are decently small sets, we produce the product set. + auto combine = [](float_t a, float_t b) { + if (b == 0) return nan_v; + return a / b; + }; + if (l.is_set() && r.is_set()) { + auto result = ProductSet(l, r, maybe_nan, zone, combine); + if (!result.IsInvalid()) return result; + } + + // Otherwise try to construct a range. + auto [l_min, l_max] = l.minmax(); + auto [r_min, r_max] = r.minmax(); + + maybe_nan = + maybe_nan || (l.Contains(0) && r.Contains(0)) || + ((l_min == -inf || l_max == inf) && (r_min == -inf || r_max == inf)); + + // If the divisor spans across 0, we give up on a precise type. + if (std::signbit(r_min) != std::signbit(r_max)) { + return type_t::Any(maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues); + } + + // If divisor includes 0, we can try to at least infer sign of the result. + if (r.Contains(0)) { + DCHECK_EQ(r_min, 0); + if (l_max < 0) { + // All values are negative. + return Range(-inf, next_smaller(float_t{0}), + maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone); + } + if (r_min >= 0) { + // All values are positive. + return Range(0, inf, + maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone); + } + return type_t::Any(maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues); + } + + std::array results; + results[0] = l_min / r_min; + results[1] = l_min / r_max; + results[2] = l_max / r_min; + results[3] = l_max / r_max; + + const float_t result_min = array_min(results); + const float_t result_max = array_max(results); + return Range(result_min, result_max, + maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone); + } + + static Type Modulus(const type_t& l, const type_t& r, Zone* zone) { + if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN(); + + bool maybe_nan = + l.has_nan() || r.has_nan() || l.Contains(-inf) || l.Contains(inf); + if (r.Contains(0)) { + if (r.IsSubtypeOf(type_t::Set({0}, type_t::kNaN, zone))) { + // If rhs contains nothing but 0 and NaN, the result will always be NaN. + return type_t::NaN(); + } + maybe_nan = true; + } + + // For integer inputs {l} and {r} we can infer a precise type. + if (IsIntegerSet(l) && IsIntegerSet(r)) { + auto [l_min, l_max] = l.minmax(); + auto [r_min, r_max] = r.minmax(); + auto l_abs = std::max(std::abs(l_min), std::abs(l_max)); + auto r_abs = std::max(std::abs(r_min), std::abs(r_max)) - 1; + auto abs = std::min(l_abs, r_abs); + float_t min = 0.0, max = 0.0; + if (l_min >= 0.0) { + // {l} positive. + max = abs; + } else if (l_max <= 0.0) { + // {l} negative. + min = 0.0 - abs; + } else { + // {l} positive or negative. + min = 0.0 - abs; + max = abs; + } + if (min == max) return Set({min}, maybe_nan, zone); + return Range(min, max, maybe_nan, zone); + } + + return type_t::Any(maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues); + } + + static Type Min(const type_t& l, const type_t& r, Zone* zone) { + if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN(); + bool maybe_nan = l.has_nan() || r.has_nan(); + + // If both sides are decently small sets, we produce the product set. + auto combine = [](float_t a, float_t b) { return std::min(a, b); }; + if (l.is_set() && r.is_set()) { + // TODO(nicohartmann@): There is a faster way to compute this set. + auto result = ProductSet(l, r, maybe_nan, zone, combine); + if (!result.IsInvalid()) return result; + } + + // Otherwise just construct a range. + auto [l_min, l_max] = l.minmax(); + auto [r_min, r_max] = r.minmax(); + + auto min = std::min(l_min, r_min); + auto max = std::min(l_max, r_max); + return Range(min, max, maybe_nan, zone); + } + + static Type Max(const type_t& l, const type_t& r, Zone* zone) { + if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN(); + bool maybe_nan = l.has_nan() || r.has_nan(); + + // If both sides are decently small sets, we produce the product set. + auto combine = [](float_t a, float_t b) { return std::max(a, b); }; + if (l.is_set() && r.is_set()) { + // TODO(nicohartmann@): There is a faster way to compute this set. + auto result = ProductSet(l, r, maybe_nan, zone, combine); + if (!result.IsInvalid()) return result; + } + + // Otherwise just construct a range. + auto [l_min, l_max] = l.minmax(); + auto [r_min, r_max] = r.minmax(); + + auto min = std::max(l_min, r_min); + auto max = std::max(l_max, r_max); + return Range(min, max, maybe_nan, zone); + } + + static Type Power(const type_t& l, const type_t& r, Zone* zone) { + if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN(); + bool maybe_nan = l.has_nan() || r.has_nan(); + + // If both sides are decently small sets, we produce the product set. + auto combine = [](float_t a, float_t b) { return std::pow(a, b); }; + if (l.is_set() && r.is_set()) { + auto result = ProductSet(l, r, maybe_nan, zone, combine); + if (!result.IsInvalid()) return result; + } + + // a ** b produces NaN if a < 0 && b is fraction + if (l.min() <= 0.0 && !IsIntegerSet(r)) maybe_nan = true; + + // TODO(nicohartmann@): Maybe we can produce a more precise range here. + return type_t::Any(maybe_nan ? type_t::kNaN : 0); + } + + static Type Atan2(const type_t& l, const type_t& r, Zone* zone) { + // TODO(nicohartmann@): Maybe we can produce a more precise range her.e + return type_t::Any(type_t::kNaN); + } + // Computes the ranges to which the sides of the comparison (lhs < rhs) can be // restricted when the comparison is true. When the comparison is true, we // learn: lhs cannot be >= rhs.max and rhs cannot be <= lhs.min and neither @@ -523,53 +742,40 @@ class Typer { return WordOperationTyper<64>::Subtract(l, r, zone); } - static Type TypeFloat32Add(const Type& lhs, const Type& rhs, Zone* zone) { - if (lhs.IsNone() || rhs.IsNone()) return Type::None(); - if (!InputIs(lhs, Type::Kind::kFloat32) || - !InputIs(rhs, Type::Kind::kFloat32)) { - return Float32Type::Any(); - } - const auto& l = lhs.AsFloat32(); - const auto& r = rhs.AsFloat32(); - - return FloatOperationTyper<32>::Add(l, r, zone); +#define FLOAT_BINOP(op, bits, float_typer_handler) \ + static Type TypeFloat##bits##op(const Type& lhs, const Type& rhs, \ + Zone* zone) { \ + if (lhs.IsNone() || rhs.IsNone()) return Type::None(); \ + if (!InputIs(lhs, Type::Kind::kFloat##bits) || \ + !InputIs(rhs, Type::Kind::kFloat##bits)) { \ + return Float##bits##Type::Any(); \ + } \ + const auto& l = lhs.AsFloat##bits(); \ + const auto& r = rhs.AsFloat##bits(); \ + return FloatOperationTyper::float_typer_handler(l, r, zone); \ } - static Type TypeFloat32Sub(const Type& lhs, const Type& rhs, Zone* zone) { - if (lhs.IsNone() || rhs.IsNone()) return Type::None(); - if (!InputIs(lhs, Type::Kind::kFloat32) || - !InputIs(rhs, Type::Kind::kFloat32)) { - return Float32Type::Any(); - } - const auto& l = lhs.AsFloat32(); - const auto& r = rhs.AsFloat32(); - - return FloatOperationTyper<32>::Subtract(l, r, zone); - } - - static Type TypeFloat64Add(const Type& lhs, const Type& rhs, Zone* zone) { - if (lhs.IsNone() || rhs.IsNone()) return Type::None(); - if (!InputIs(lhs, Type::Kind::kFloat64) || - !InputIs(rhs, Type::Kind::kFloat64)) { - return Float64Type::Any(); - } - const auto& l = lhs.AsFloat64(); - const auto& r = rhs.AsFloat64(); - - return FloatOperationTyper<64>::Add(l, r, zone); - } - - static Type TypeFloat64Sub(const Type& lhs, const Type& rhs, Zone* zone) { - if (lhs.IsNone() || rhs.IsNone()) return Type::None(); - if (!InputIs(lhs, Type::Kind::kFloat64) || - !InputIs(rhs, Type::Kind::kFloat64)) { - return Float64Type::Any(); - } - const auto& l = lhs.AsFloat64(); - const auto& r = rhs.AsFloat64(); - - return FloatOperationTyper<64>::Subtract(l, r, zone); - } + // Float32 operations + FLOAT_BINOP(Add, 32, Add) + FLOAT_BINOP(Sub, 32, Subtract) + FLOAT_BINOP(Mul, 32, Multiply) + FLOAT_BINOP(Div, 32, Divide) + FLOAT_BINOP(Mod, 32, Modulus) + FLOAT_BINOP(Min, 32, Min) + FLOAT_BINOP(Max, 32, Max) + FLOAT_BINOP(Power, 32, Power) + FLOAT_BINOP(Atan2, 32, Atan2) + // Float64 operations + FLOAT_BINOP(Add, 64, Add) + FLOAT_BINOP(Sub, 64, Subtract) + FLOAT_BINOP(Mul, 64, Multiply) + FLOAT_BINOP(Div, 64, Divide) + FLOAT_BINOP(Mod, 64, Modulus) + FLOAT_BINOP(Min, 64, Min) + FLOAT_BINOP(Max, 64, Max) + FLOAT_BINOP(Power, 64, Power) + FLOAT_BINOP(Atan2, 64, Atan2) +#undef FLOAT_BINOP static Word64Type ExtendWord32ToWord64(const Word32Type& t, Zone* zone) { // We cannot infer much, but the lower bound of the word32 is also the lower @@ -948,38 +1154,40 @@ class TypeInferenceReducer : public Next { Type result_type = Type::Invalid(); Type left_type = GetType(left); Type right_type = GetType(right); + Zone* zone = Asm().graph_zone(); if (!left_type.IsInvalid() && !right_type.IsInvalid()) { +#define CASE(op, bits) \ + case FloatBinopOp::Kind::k##op: \ + result_type = Typer::TypeFloat##bits##op(left_type, right_type, zone); \ + break; if (rep == FloatRepresentation::Float32()) { switch (kind) { - case FloatBinopOp::Kind::kAdd: - result_type = Typer::TypeFloat32Add(left_type, right_type, - Asm().graph_zone()); - break; - case FloatBinopOp::Kind::kSub: - result_type = Typer::TypeFloat32Sub(left_type, right_type, - Asm().graph_zone()); - break; - default: - // TODO(nicohartmann@): Support remaining {kind}s. - break; + CASE(Add, 32) + CASE(Sub, 32) + CASE(Mul, 32) + CASE(Div, 32) + CASE(Mod, 32) + CASE(Min, 32) + CASE(Max, 32) + CASE(Power, 32) + CASE(Atan2, 32) } } else { DCHECK_EQ(rep, FloatRepresentation::Float64()); switch (kind) { - case FloatBinopOp::Kind::kAdd: - result_type = Typer::TypeFloat64Add(left_type, right_type, - Asm().graph_zone()); - break; - case FloatBinopOp::Kind::kSub: - result_type = Typer::TypeFloat64Sub(left_type, right_type, - Asm().graph_zone()); - break; - default: - // TODO(nicohartmann@): Support remaining {kind}s. - break; + CASE(Add, 64) + CASE(Sub, 64) + CASE(Mul, 64) + CASE(Div, 64) + CASE(Mod, 64) + CASE(Min, 64) + CASE(Max, 64) + CASE(Power, 64) + CASE(Atan2, 64) } } +#undef CASE } SetType(index, result_type); diff --git a/test/mjsunit/turboshaft/type-inference.js b/test/mjsunit/turboshaft/type-inference.js index 6da87c2535..4fc18493ce 100644 --- a/test/mjsunit/turboshaft/type-inference.js +++ b/test/mjsunit/turboshaft/type-inference.js @@ -33,7 +33,43 @@ function add2(x) { return %CheckTurboshaftTypeOf(result, "Float64{5.0}"); } -let targets = [ add1, add2 ]; +function mul2(x) { + let a = x ? 3.5 : 7.0; + let r = -1.0; + if (a < 5.0) r = a * 5.0; + else r = a * 2.5; + let result = r - 0.5; + return result; +} + +function div2(x) { + let a = x ? 3.3 : 6.6; + let r = -1.0; + if (a < 5.0) r = a / 1.1; + else r = a / 2.2; + let result = r - 0.5; + return %CheckTypeOf(result, "Float64[2.49999,2.50001]"); +} + +//function min2(x) { +// let a = x ? 3.3 : 6.6; +// let r = -1.0; +// if (a < 5.0) r = Math.min(a, 6.6); +// else r = Math.min(3.3, a); +// let result = r - 0.3; +// return %CheckTypeOf(result, "Float64{3}"); +//} +// +//function max2(x) { +// let a = x ? 3.3 : 6.6; +// let r = -1.0; +// if (a < 5.0) r = Math.max(a, 6.6); +// else r = Math.max(3.3, a); +// let result = r - 0.6; +// return %CheckTypeOf(result, "Float64{6}"); +//} + +let targets = [ constants, add1, add2, mul2, div2, min2, max2 ]; for(let f of targets) { %PrepareFunctionForOptimization(f); f(true); From 5b1929a8f011521c89ba663efde3c757b2336e14 Mon Sep 17 00:00:00 2001 From: Michael Lippautz Date: Wed, 4 Jan 2023 12:27:26 +0100 Subject: [PATCH 138/654] [heap] Fix accounting of used bytes in CppHeap Bug: chromium:1404804, v8:13207 Change-Id: I352c3be0125c4344b613474757a900eb0114ff5f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135878 Auto-Submit: Michael Lippautz Reviewed-by: Anton Bikineev Commit-Queue: Michael Lippautz Commit-Queue: Anton Bikineev Cr-Commit-Position: refs/heads/main@{#85089} --- src/heap/cppgc-js/cpp-heap.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/heap/cppgc-js/cpp-heap.cc b/src/heap/cppgc-js/cpp-heap.cc index f30734b198..7b7473f17f 100644 --- a/src/heap/cppgc-js/cpp-heap.cc +++ b/src/heap/cppgc-js/cpp-heap.cc @@ -885,9 +885,11 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() { if (bytes_to_report < 0) { DCHECK_GE(used_size_.load(std::memory_order_relaxed), bytes_to_report); - used_size_.fetch_sub(bytes_to_report, std::memory_order_relaxed); + used_size_.fetch_sub(static_cast(-bytes_to_report), + std::memory_order_relaxed); } else { - used_size_.fetch_add(bytes_to_report, std::memory_order_relaxed); + used_size_.fetch_add(static_cast(bytes_to_report), + std::memory_order_relaxed); allocated_size_ += bytes_to_report; if (v8_flags.global_gc_scheduling && v8_flags.incremental_marking) { From c7450a8f3d0146f51d04bf1c3641c5eb402eac4e Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Wed, 4 Jan 2023 11:36:16 +0100 Subject: [PATCH 139/654] [testrunner] Allow rules with negated build variables in variants.py .. since many of these rules are actually relevant when some build variable is *not* set. Instead of defining an artificial "no_foo" variable in addition to "foo", allow definition of rules on a negative build variable condition, e.g.: "!is_debug": [...] This new syntax will be used extensively in a followup CL. Bug: v8:13629,v8:10577 Change-Id: I5ad432e71249b50d15047930e3f9143e872716d8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4134247 Reviewed-by: Tobias Tebbi Auto-Submit: Jakob Linke Commit-Queue: Tobias Tebbi Cr-Commit-Position: refs/heads/main@{#85090} --- tools/testrunner/local/variants.py | 5 +++++ tools/testrunner/objects/testcase.py | 15 +++++++++++---- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/tools/testrunner/local/variants.py b/tools/testrunner/local/variants.py index 8f13c0274e..31da71cf6d 100644 --- a/tools/testrunner/local/variants.py +++ b/tools/testrunner/local/variants.py @@ -105,6 +105,11 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = { # in _get_statusfile_variables in base_runner.py. # The conflicts might be directly contradictory flags or be caused by the # implications defined in flag-definitions.h. +# The keys of the following map support negation through '!', e.g. rule +# +# "!code_comments": [...] +# +# applies when the code_comments build variable is NOT set. INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = { "lite_mode": ["--no-lazy-feedback-allocation", "--max-semi-space-size=*", "--stress-concurrent-inlining"] diff --git a/tools/testrunner/objects/testcase.py b/tools/testrunner/objects/testcase.py index 73ee9ff5e4..dfd25132c8 100644 --- a/tools/testrunner/objects/testcase.py +++ b/tools/testrunner/objects/testcase.py @@ -251,10 +251,17 @@ class TestCase(object): # Contradiction: flags specified through the "Flags:" annotation are # incompatible with the build. for variable, incompatible_flags in INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE.items(): - if self.suite.statusfile.variables[variable]: - check_flags( - incompatible_flags, file_specific_flags, - "INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE[\"" + variable + "\"]") + if variable.startswith("!"): + # `variable` is negated, apply the rule if the build variable is NOT set. + if not self.suite.statusfile.variables[variable[1:]]: + check_flags( + incompatible_flags, file_specific_flags, + "INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE[\"" + variable + "\"]") + else: + if self.suite.statusfile.variables[variable]: + check_flags( + incompatible_flags, file_specific_flags, + "INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE[\"" + variable + "\"]") # Contradiction: flags passed through --extra-flags are incompatible. for extra_flag, incompatible_flags in INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG.items(): From f0254afaab04039d7430f8e6c18671144dd65643 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Samuel=20Gro=C3=9F?= Date: Wed, 4 Jan 2023 13:13:12 +0100 Subject: [PATCH 140/654] [sandbox] Initialize EPT evacuation entries atomically MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, evacuation entries are initialized non-atomically as they will only be accessed during sweeping. However, it can happen that another thread attempts (but fails) to allocate the same table entry, causing a memory read from the same entry. If that happens, TSan will complain about a data race. Using an atomic store avoids this. Bug: chromium:1370743 Change-Id: Idaa5548494d4b1660ee5a798966dd09bf4b3d55c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135880 Commit-Queue: Samuel Groß Reviewed-by: Michael Lippautz Cr-Commit-Position: refs/heads/main@{#85091} --- src/sandbox/external-pointer-table-inl.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/sandbox/external-pointer-table-inl.h b/src/sandbox/external-pointer-table-inl.h index 6b8694b2ba..8af920dde2 100644 --- a/src/sandbox/external-pointer-table-inl.h +++ b/src/sandbox/external-pointer-table-inl.h @@ -176,9 +176,12 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle, if (new_handle) { DCHECK_LT(HandleToIndex(new_handle), current_start_of_evacuation_area); uint32_t index = HandleToIndex(new_handle); - // No need for an atomic store as the entry will only be accessed during - // sweeping. - Store(index, Entry::MakeEvacuationEntry(handle_location)); + // Even though the new entry will only be accessed during sweeping, this + // still needs to be an atomic write as another thread may attempt (and + // fail) to allocate the same table entry, thereby causing a read from + // this memory location. Without an atomic store here, TSan would then + // complain about a data race. + RelaxedStore(index, Entry::MakeEvacuationEntry(handle_location)); #ifdef DEBUG // Mark the handle as visited in debug builds to detect double // initialization of external pointer fields. From bc4bac3877a569f29912e0791f1cdb588597be6f Mon Sep 17 00:00:00 2001 From: Milad Fa Date: Tue, 3 Jan 2023 14:35:19 -0500 Subject: [PATCH 141/654] PPC[liftoff]: Implement Simd128 Construct Change-Id: Iad47ca2c3d4918957aea3896d500d4aaa4ffa13d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4133305 Reviewed-by: Junliang Yan Commit-Queue: Milad Farazmand Cr-Commit-Position: refs/heads/main@{#85092} --- src/wasm/baseline/ppc/liftoff-assembler-ppc.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h index c18aae82ce..7c101e7d36 100644 --- a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h +++ b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h @@ -2692,7 +2692,11 @@ void LiftoffStackSlots::Construct(int param_slots) { break; } case kS128: { - asm_->bailout(kSimd, "LiftoffStackSlots::Construct"); + asm_->AllocateStackSpace(stack_decrement - kSimd128Size); + asm_->LoadSimd128(kScratchSimd128Reg, + liftoff::GetStackSlot(slot.src_offset_), r0); + asm_->AddS64(sp, sp, Operand(-kSimd128Size)); + asm_->StoreSimd128(kScratchSimd128Reg, MemOperand(sp), r0); break; } default: @@ -2720,7 +2724,8 @@ void LiftoffStackSlots::Construct(int param_slots) { asm_->StoreF64(src.reg().fp(), MemOperand(sp), r0); break; case kS128: { - asm_->bailout(kSimd, "LiftoffStackSlots::Construct"); + asm_->AddS64(sp, sp, Operand(-kSimd128Size), r0); + asm_->StoreSimd128(src.reg().fp().toSimd(), MemOperand(sp), r0); break; } default: From 22ef44b6553d8207aac5766c5fd7624a1e41fbc4 Mon Sep 17 00:00:00 2001 From: yangwenming Date: Wed, 4 Jan 2023 22:16:07 +0800 Subject: [PATCH 142/654] [cppgc] check on a valid shared_ptr. This CL fixes calling CHECK_NULL on a moved shared_ptr. Bug: v8:13589 Change-Id: I52ab261df7e995f4a9fcfd7a2a3c2c0012a4c94f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135701 Reviewed-by: Michael Lippautz Commit-Queue: Michael Lippautz Cr-Commit-Position: refs/heads/main@{#85093} --- src/heap/cppgc/heap-base.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/heap/cppgc/heap-base.cc b/src/heap/cppgc/heap-base.cc index d7402158ac..33b68b5367 100644 --- a/src/heap/cppgc/heap-base.cc +++ b/src/heap/cppgc/heap-base.cc @@ -101,7 +101,7 @@ class PlatformWithPageAllocator final : public cppgc::Platform { page_allocator_(GetGlobalPageAllocator()) { // This platform wrapper should only be used if the platform doesn't provide // a `PageAllocator`. - CHECK_NULL(delegate->GetPageAllocator()); + CHECK_NULL(delegate_->GetPageAllocator()); } ~PlatformWithPageAllocator() override = default; From 63134966fda12fb4dfb51413d545aa0ea7c67db5 Mon Sep 17 00:00:00 2001 From: Qifan Pan Date: Wed, 4 Jan 2023 14:16:52 +0100 Subject: [PATCH 143/654] [turbofan] Fix a bug of SignedBigInt64 in representation changer The expected behavior of the optimized code is deoptimizing when using a BigInt as an index and throwing an error (from CheckedTaggedToInt64). The representation changer tries to insert conversions for this case where - The output node is represented in Word64 (SignedBigInt64) - The use info is CheckedSigned64AsWord64 The representation changer first rematerializes the output node to TaggedPointer because the type check is not BigInt. Then it falls wrongly to the branch where the output representation is TaggedPointer, the output type is SignedBigInt64 in GetWord64RepresentationFor. Bug: v8:9407, chromium:1403574, chromium:1404607 Change-Id: I9d7ef4c94c1dc0aa3b4f49871ec35ef0877efc24 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135876 Reviewed-by: Nico Hartmann Commit-Queue: Qifan Pan Cr-Commit-Position: refs/heads/main@{#85094} --- src/compiler/representation-change.cc | 4 +--- .../compiler/test-representation-change.cc | 7 ------- test/mjsunit/regress/regress-1404607.js | 18 ++++++++++++++++++ 3 files changed, 19 insertions(+), 10 deletions(-) create mode 100644 test/mjsunit/regress/regress-1404607.js diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc index 5182369fcf..32ff14f3bb 100644 --- a/src/compiler/representation-change.cc +++ b/src/compiler/representation-change.cc @@ -1249,9 +1249,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor( ((use_info.truncation().IsUsedAsWord64() && (use_info.type_check() == TypeCheckKind::kBigInt || output_type.Is(Type::BigInt()))) || - (use_info.type_check() == TypeCheckKind::kBigInt64 || - output_type.Is(Type::SignedBigInt64()) || - output_type.Is(Type::UnsignedBigInt64())))) { + use_info.type_check() == TypeCheckKind::kBigInt64)) { node = GetTaggedPointerRepresentationFor(node, output_rep, output_type, use_node, use_info); op = simplified()->TruncateBigIntToWord64(); diff --git a/test/cctest/compiler/test-representation-change.cc b/test/cctest/compiler/test-representation-change.cc index a9fee37e14..b1e1d51d2a 100644 --- a/test/cctest/compiler/test-representation-change.cc +++ b/test/cctest/compiler/test-representation-change.cc @@ -530,13 +530,6 @@ TEST(Word64) { IrOpcode::kChangeInt64ToFloat64, IrOpcode::kChangeFloat64ToTaggedPointer, MachineRepresentation::kWord64, TypeCache::Get()->kSafeInteger, MachineRepresentation::kTaggedPointer); - - CheckChange(IrOpcode::kTruncateBigIntToWord64, - MachineRepresentation::kTaggedPointer, Type::SignedBigInt64(), - MachineRepresentation::kWord64); - CheckChange(IrOpcode::kTruncateBigIntToWord64, - MachineRepresentation::kTaggedPointer, Type::UnsignedBigInt64(), - MachineRepresentation::kWord64); } TEST(SingleChanges) { diff --git a/test/mjsunit/regress/regress-1404607.js b/test/mjsunit/regress/regress-1404607.js new file mode 100644 index 0000000000..8ced478320 --- /dev/null +++ b/test/mjsunit/regress/regress-1404607.js @@ -0,0 +1,18 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +function opt() { + const buffer = new ArrayBuffer(64); + const view = new DataView(buffer); + let i = 1n; + i += 1n; + view.setUint8(i); +} + +%PrepareFunctionForOptimization(opt); +assertThrows(opt, TypeError); +%OptimizeFunctionOnNextCall(opt); +assertThrows(opt, TypeError); From d201f32e509189d8b4d72be0c301839825fb76ff Mon Sep 17 00:00:00 2001 From: Junliang Yan Date: Wed, 4 Jan 2023 09:58:35 -0500 Subject: [PATCH 144/654] ppc: [ptr-cage] Add ppc support Change-Id: I09da99e525c2c0ad992c70f5f6a715e36e6ede30 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135888 Commit-Queue: Junliang Yan Reviewed-by: Milad Farazmand Cr-Commit-Position: refs/heads/main@{#85095} --- src/builtins/ppc/builtins-ppc.cc | 6 ++++++ src/codegen/ppc/macro-assembler-ppc.cc | 8 ++++---- src/codegen/ppc/macro-assembler-ppc.h | 5 +++++ src/compiler/backend/ppc/code-generator-ppc.cc | 4 ++-- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc index 93e8b742f0..ca100f3eb7 100644 --- a/src/builtins/ppc/builtins-ppc.cc +++ b/src/builtins/ppc/builtins-ppc.cc @@ -836,6 +836,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // Initialize the root register. // C calling convention. The first argument is passed in r3. __ mr(kRootRegister, r3); + +#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE + // Initialize the pointer cage base register. + __ LoadRootRelative(kPtrComprCageBaseRegister, + IsolateData::cage_base_offset()); +#endif } // Push a frame with special values setup to mark it as an entry frame. diff --git a/src/codegen/ppc/macro-assembler-ppc.cc b/src/codegen/ppc/macro-assembler-ppc.cc index f1a1d535bc..9fca29f6bb 100644 --- a/src/codegen/ppc/macro-assembler-ppc.cc +++ b/src/codegen/ppc/macro-assembler-ppc.cc @@ -685,7 +685,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination, Register source) { RecordComment("[ DecompressTaggedPointer"); ZeroExtWord32(destination, source); - add(destination, destination, kRootRegister); + add(destination, destination, kPtrComprCageBaseRegister); RecordComment("]"); } @@ -693,7 +693,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination, MemOperand field_operand) { RecordComment("[ DecompressTaggedPointer"); LoadU32(destination, field_operand, r0); - add(destination, destination, kRootRegister); + add(destination, destination, kPtrComprCageBaseRegister); RecordComment("]"); } @@ -701,7 +701,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, MemOperand field_operand) { RecordComment("[ DecompressAnyTagged"); LoadU32(destination, field_operand, r0); - add(destination, destination, kRootRegister); + add(destination, destination, kPtrComprCageBaseRegister); RecordComment("]"); } @@ -709,7 +709,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, Register source) { RecordComment("[ DecompressAnyTagged"); ZeroExtWord32(destination, source); - add(destination, destination, kRootRegister); + add(destination, destination, kPtrComprCageBaseRegister); RecordComment("]"); } diff --git a/src/codegen/ppc/macro-assembler-ppc.h b/src/codegen/ppc/macro-assembler-ppc.h index 79adea4c16..36f4738df7 100644 --- a/src/codegen/ppc/macro-assembler-ppc.h +++ b/src/codegen/ppc/macro-assembler-ppc.h @@ -14,6 +14,7 @@ #include "src/codegen/bailout-reason.h" #include "src/codegen/ppc/assembler-ppc.h" #include "src/common/globals.h" +#include "src/execution/isolate-data.h" #include "src/objects/contexts.h" namespace v8 { @@ -140,6 +141,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void InitializeRootRegister() { ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); mov(kRootRegister, Operand(isolate_root)); +#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE + LoadRootRelative(kPtrComprCageBaseRegister, + IsolateData::cage_base_offset()); +#endif } void LoadDoubleLiteral(DoubleRegister result, base::Double value, diff --git a/src/compiler/backend/ppc/code-generator-ppc.cc b/src/compiler/backend/ppc/code-generator-ppc.cc index 97ad39c4f7..ca097cb0c2 100644 --- a/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/src/compiler/backend/ppc/code-generator-ppc.cc @@ -2917,13 +2917,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kPPC_LoadDecompressTaggedPointer: { CHECK(instr->HasOutput()); ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false); - __ add(i.OutputRegister(), i.OutputRegister(), kRootRegister); + __ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister); break; } case kPPC_LoadDecompressAnyTagged: { CHECK(instr->HasOutput()); ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false); - __ add(i.OutputRegister(), i.OutputRegister(), kRootRegister); + __ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister); break; } default: From 203d9c8cb66751f5944f493bfaf0a8120a5899f7 Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Wed, 4 Jan 2023 16:06:23 +0100 Subject: [PATCH 145/654] [maglev][arm64] Fix InterruptBudget IRs Use a temporary for feedback cell, since if the `amount` is big enough, the macro instructions Add/Sub might need a temporary register as well. Bug: v8:7700 Change-Id: I2930f525ab3bf7d92fc1a47d9c483577c6186400 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135889 Reviewed-by: Darius Mercadier Commit-Queue: Darius Mercadier Auto-Submit: Victor Gomes Cr-Commit-Position: refs/heads/main@{#85096} --- src/maglev/arm64/maglev-ir-arm64.cc | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 20ebd60523..bca29453aa 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -1679,11 +1679,13 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, FieldMemOperand(generator, JSGeneratorObject::kInputOrDebugPosOffset)); } -void IncreaseInterruptBudget::SetValueLocationConstraints() {} +void IncreaseInterruptBudget::SetValueLocationConstraints() { + set_temporaries_needed(1); +} void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { UseScratchRegisterScope temps(masm); - Register feedback_cell = temps.AcquireX(); + Register feedback_cell = general_temporaries().PopFirst(); Register budget = temps.AcquireW(); __ Ldr(feedback_cell, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); @@ -1698,12 +1700,14 @@ void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm, } int ReduceInterruptBudget::MaxCallStackArgs() const { return 1; } -void ReduceInterruptBudget::SetValueLocationConstraints() {} +void ReduceInterruptBudget::SetValueLocationConstraints() { + set_temporaries_needed(1); +} void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { { UseScratchRegisterScope temps(masm); - Register feedback_cell = temps.AcquireX(); + Register feedback_cell = general_temporaries().PopFirst(); Register budget = temps.AcquireW(); __ Ldr(feedback_cell, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); From 6adfc3856f7601616f622ebe74a17f1ca571344f Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Wed, 4 Jan 2023 14:11:34 +0100 Subject: [PATCH 146/654] [git blame] Add a few old refactors to .git-blame-ignore-revs Namely: 1. objects.h splitting into objects/*.h (crbug.com/v8/5402) 2. src/ splitting into subfolders for OWNERS (crbug.com/v8/9247) 3. splitting include/v8.h (crbug.com/v8/11965) This is best used with: # Use the .git-blame-ignore-revs file for git blames git config --global blame.ignorerevsfile .git-blame-ignore-revs # Track code movement with git blame using -C git blame -C Change-Id: Ia5a641be077a9befe008857beee3b6808bbd6107 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135882 Reviewed-by: Alexander Schulze Commit-Queue: Alexander Schulze Auto-Submit: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#85097} --- .git-blame-ignore-revs | 95 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 4c53e208e3..29372f34c1 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -18,11 +18,106 @@ # - Because you must use a hash, you need to append to this list in a follow-up # CL to the actual reformatting CL that you are trying to ignore. +# objects.h splitting +0604031eb1d01c52b6c1c9ae3012d80b23d74a68 +09e405453359000d66cc0faaa102854e626bebeb +766ef168fbcac6bd0728cc2c9bb3ae7cbd74278a +b5a2839b927be04bdb50a236071c11764e4d6400 +c911f91b5b6219e038c0117b05a8375bdf3db0b0 +1bb48bf91ba8c887884a0fbd674c91f64964d8a5 +19da9f24df7b59fec72b9dd8a979ad0ce5639c87 +b090d7e7468236ffce0afdb55bb496bf0073f2ee +f40638d148b7a435522d5b714993908061e3b10d +e8a1c25f6afae9b77921abb70fad49da252eb6f0 +6fa8283d0e031c9585d190f751907ed45bf85de0 +9aa861c4bcfed612039259f93c2cd2b01337e99e +8175648018bd9f70af866f9fa433f1d79644d86b +c7b1ceb801ec7f639a093468d8e6424212cc197c +e39d2cbe1b1baa6513ddce2d73c981e335cc34fb +eda00a5c499b7a83479115eb275a816b8a2ed104 +68deca9b418976ca8b3375e81058a9e0a815357f +0525e17847f39f80e3fd163021a58f68d8fcaf06 +81a3c699d6eef936452ac3d10c7c59a2c1e38c0c +01452bedfca2b5447a7f62bda87edbbb76259a6e +1baf1050113a5418696839c273e05ea5ad1b5c4d +4b39fe3d608916b1cfea015de287511a1623fc7f +c6effdbba9b301244475553538f6eb1b3d9670b9 +71e4c573199466ea4541e3d6b307c9b33d7bb785 +efc92f0d4aa77bb90f5b56606b6f0d0819fba4af +a9db2c74b5bae2345ac52be404748954a3b5050d +0a01b6202226bbe99c0b83acf6c5a80344f5fb6a +a6c44361c8f2dc07b935e3f2bb3e0d3ad4f4a383 +10d8aab1de430695a69e9d75af6ea42c2cdc9d6d +dd3c4fca2f0a2761b8b95cd47fcd62836d714890 +e9c932233980866074025e65051003d1f298516c +2b1f79881c3f0b69bfb9274bda57ea50f7304982 +7f031160d71a3d836667dc98288eaff4c94e6f56 +490fabb4578f8a3c4096fdccff688c17ed5ed00d +d953b2ab726acca0b3abe90ce090a16d7ccc2ae3 +bb514c426b9438cfb1149d219ac4ec2d8d1c8458 +dfb453d713d8a05e76f720a6aae2871eec210276 +b490fd66b873c89fca37b21eab58502b6367a864 +9a71683d9c8ff9470eda6be5b2b11babac7b9863 +37945f731c4d800ef788e3c32f8663773a93450e +b90c98fc29a8d896354de4a22c055f6d98376171 +35f3e9d0e654e84646a0b98f29e4a2786cdca4b1 +260eb5bb9b62ea3d5fa6ad0b0e8c2de75d48bad4 +cc2c11441ce352360acce8638a19f58edf361f7d +7be0159e4b1e0b064e215ae4ced34d649cb2552e +95a7cfe0eaabbcff0f730ed60e1805779f6cfe41 +8f54d18ba4ad10770e9537a2803459feccfe79a3 +f44759d9ff52a3e5563e5f2bb23ee2c08222fcfd +09050c8a967f5f2956305e5d016b304d7bf5e669 +c769745d5856a7eb3a0dbe6af5376c7638944364 +a1547aa914aeedd7862f74124c18d2bbaf432c36 +5f950698c0dc7c36b855961feb929022f74102fb +4aedeb1bd50c12ebcd6cf954c4cbef1205fff5ac +7366d8954cb1bd277d3283241da2fae62b886c48 +bc35251f5e55a65c3a4acf7cba52cee505c86a46 +4fb60b215801db70c694a799e735b64bfead59bb +03762b8488de0e393077e3f40fe7b63e675b3af3 +a8a45d875f0a98b192cf0063ceda12aaf75ddfaf +a48e5ab8804e9e97b5ea577d6f2667bacee92eb2 + # Update of quotations in DEPS file. e50b49a0e38b34e2b28e026f4d1c7e0da0c7bb1a # Rewrite code base to use "." instead of "->" to access Object members. 878ccb33bd3cf0e6dc018ff8d15843f585ac07be +# Splitting src/ into subfolders +632239011db501e76475d82ff6492f37fa8c1edc +f455f86d899716df3b9550950ce172f5b867619a +24a51e1eee4e286165dd0bba6afb4c35e8177a25 +f9a88acbc928f0fc5e9a3acbcd3b4ece52355f3d +dec3298d9cfbe95759774a0e00302a08836b5f3d +a0c3797461810e3159662851e64946e17654236e +b72941e8b0d2843adf768442024d8950da798db1 +4c986c625f19e35c95f3492c662822f4695218b4 +0fa243af7096ee5b748b194476be2e4efecaec59 +786ce26341b7ab11b4d42f1c77202530d5138ad2 +a6eeea35cb7ff0c29b6cfdd1c786f382110241ce +be014256adea1552d4a044ef80616cdab6a7d549 +93d3b7173fec7d010539057cdbd78d497f09fa9b +5bfe84a0dab60289b3470c080908ce83ac2212d4 +a7695520556665ba73ab02c497ab73b162a5fb13 +61523c45a335fe3be76498e0b16bf8e7aec0d058 +bf372a73d8a5f4029fc9f4f69b675ef0cad80ada +8ad6b335376c6275ffb3361c662a1a45c853f4fc +06bf8261cf2c94fc071652652600b5790f719c05 +81a0102fe8586071cc68e9595b26c5c1207ee5b3 +5f28539599f6a6a265e18b8c897cc96ccbeec9c4 +3253767622a784866dc34aeb7b5d0f02ebdff61e +9ac8b20086f95f1158a1901eefe12e25fd0333e4 +3cb560adfe26edb586a0e6e655e5a7c4755cad1a +7bbd0bfe5161d57bcf268716ce4d1ce14d6786e6 +c39cabbcbea26891558b81fd2236c38a7aeada08 +a3187716d31a0ab9d7051adde6be9bd2b2c6fec1 + # Move test/mjsunit/regress-*.js => test/mjsunit/regress/ cb67be1a3842fcf6a0da18aee444e3b7ea789e04 + +# [include] Split out v8.h +d1b27019d3bf86360ea838c317f8505fac6d3a7e +44fe02ced6e4c6b49d627807e3b3fd0edbbeb36e +ec06bb6ce5641cf65e400ec55b7421f87d04b999 From 05a76791c462f6f06cd910011e54baec91668d04 Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Wed, 4 Jan 2023 16:07:16 +0100 Subject: [PATCH 147/654] [liftoff][fuzzer] Do not throw before frame setup Delay the "max steps" check until the frame is fully set up. This means that the work is already done at the point where we check the maximum number of steps, but the additional work is limited by the maximum number of locals and parameters. R=thibaudm@chromium.org Bug: chromium:1404619 Change-Id: I4919c837feea92af84f99182a571edf96e4728ac Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135890 Auto-Submit: Clemens Backes Commit-Queue: Thibaud Michaud Reviewed-by: Thibaud Michaud Cr-Commit-Position: refs/heads/main@{#85098} --- src/wasm/baseline/liftoff-compiler.cc | 14 ++++++++------ test/fuzzer/wasm/regress-1404619.wasm | Bin 0 -> 38 bytes 2 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 test/fuzzer/wasm/regress-1404619.wasm diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index a9b9a64864..970fa7fe2f 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -901,12 +901,6 @@ class LiftoffCompiler { if (for_debugging_) __ ResetOSRTarget(); - if (V8_UNLIKELY(max_steps_)) { - // Subtract 16 steps for the function call itself (including the function - // prologue), plus 1 for each local (including parameters). - CheckMaxSteps(decoder, 16 + __ num_locals()); - } - if (num_params) { CODE_COMMENT("process parameters"); ParameterProcessor processor(this, num_params); @@ -960,6 +954,14 @@ class LiftoffCompiler { // is never a position of any instruction in the function. StackCheck(decoder, 0); + if (V8_UNLIKELY(max_steps_)) { + // Subtract 16 steps for the function call itself (including the function + // prologue), plus 1 for each local (including parameters). + // Do this only *after* setting up the frame completely, even though we + // already executed the work then. + CheckMaxSteps(decoder, 16 + __ num_locals()); + } + if (v8_flags.trace_wasm) TraceFunctionEntry(decoder); } diff --git a/test/fuzzer/wasm/regress-1404619.wasm b/test/fuzzer/wasm/regress-1404619.wasm new file mode 100644 index 0000000000000000000000000000000000000000..0904cc486695b0e60ff4c5757b6d15c365cb108d GIT binary patch literal 38 tcmZQbEY4+QU|?WmV@zNyU@Tx}Vq{?FU}VWn%* Date: Wed, 4 Jan 2023 16:32:52 +0100 Subject: [PATCH 148/654] [maglev] Support in-heap TypedArrays Drive-by: fix a bug with TypedArray loads: because we used the output register as a temporary, if it was actually aliasing with one of the input registers, the generated code was incorrect. Bug: v8:7700 Change-Id: Id297f728ca2de13ebc5993cea675900fbfdd7886 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135884 Reviewed-by: Victor Gomes Commit-Queue: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85099} --- src/maglev/arm64/maglev-assembler-arm64-inl.h | 13 +++++++++++ src/maglev/arm64/maglev-ir-arm64.cc | 22 +++++++++--------- src/maglev/maglev-assembler.h | 2 ++ src/maglev/maglev-graph-builder.cc | 5 ---- src/maglev/x64/maglev-assembler-x64-inl.h | 12 ++++++++++ src/maglev/x64/maglev-ir-x64.cc | 23 +++++++++++-------- 6 files changed, 52 insertions(+), 25 deletions(-) diff --git a/src/maglev/arm64/maglev-assembler-arm64-inl.h b/src/maglev/arm64/maglev-assembler-arm64-inl.h index b69266adb4..f4949e598b 100644 --- a/src/maglev/arm64/maglev-assembler-arm64-inl.h +++ b/src/maglev/arm64/maglev-assembler-arm64-inl.h @@ -317,6 +317,19 @@ inline MemOperand MaglevAssembler::ToMemOperand(const ValueLocation& location) { return ToMemOperand(location.operand()); } +inline void MaglevAssembler::BuildTypedArrayDataPointer(Register data_pointer, + Register object) { + DCHECK_NE(data_pointer, object); + LoadExternalPointerField( + data_pointer, + FieldMemOperand(object, JSTypedArray::kExternalPointerOffset)); + if (JSTypedArray::kMaxSizeInHeap == 0) return; + UseScratchRegisterScope scope(this); + Register base = scope.AcquireW(); + ldr(base, FieldMemOperand(object, JSTypedArray::kBasePointerOffset)); + add(data_pointer, data_pointer, base); +} + inline void MaglevAssembler::LoadBoundedSizeFromObject(Register result, Register object, int offset) { diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index bca29453aa..0614d455af 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -1757,7 +1757,7 @@ void LoadSignedIntTypedArrayElement::GenerateCode( Register object = ToRegister(object_input()); Register index = ToRegister(index_input()); Register result_reg = ToRegister(result()); - Register data_pointer = result_reg; + __ AssertNotSmi(object); if (v8_flags.debug_code) { UseScratchRegisterScope temps(masm); @@ -1765,10 +1765,11 @@ void LoadSignedIntTypedArrayElement::GenerateCode( __ CompareObjectType(object, scratch, scratch, JS_TYPED_ARRAY_TYPE); __ Assert(eq, AbortReason::kUnexpectedValue); } + + UseScratchRegisterScope temps(masm); + Register data_pointer = temps.AcquireX(); int element_size = ElementsKindSize(elements_kind_); - __ LoadExternalPointerField( - data_pointer, - FieldMemOperand(object, JSTypedArray::kExternalPointerOffset)); + __ BuildTypedArrayDataPointer(data_pointer, object); __ Add(data_pointer, data_pointer, Operand(index, LSL, element_size / 2)); __ LoadSignedField(result_reg.W(), MemOperand(data_pointer), element_size); } @@ -1783,7 +1784,7 @@ void LoadUnsignedIntTypedArrayElement::GenerateCode( Register object = ToRegister(object_input()); Register index = ToRegister(index_input()); Register result_reg = ToRegister(result()); - Register data_pointer = result_reg; + __ AssertNotSmi(object); if (v8_flags.debug_code) { UseScratchRegisterScope temps(masm); @@ -1791,10 +1792,11 @@ void LoadUnsignedIntTypedArrayElement::GenerateCode( __ CompareObjectType(object, scratch, scratch, JS_TYPED_ARRAY_TYPE); __ Assert(eq, AbortReason::kUnexpectedValue); } + + UseScratchRegisterScope temps(masm); + Register data_pointer = temps.AcquireX(); int element_size = ElementsKindSize(elements_kind_); - __ LoadExternalPointerField( - data_pointer, - FieldMemOperand(object, JSTypedArray::kExternalPointerOffset)); + __ BuildTypedArrayDataPointer(data_pointer, object); __ Add(data_pointer, data_pointer, Operand(index, LSL, element_size / 2)); __ LoadUnsignedField(result_reg.W(), MemOperand(data_pointer), element_size); } @@ -1820,9 +1822,7 @@ void LoadDoubleTypedArrayElement::GenerateCode(MaglevAssembler* masm, UseScratchRegisterScope temps(masm); Register data_pointer = temps.AcquireX(); - __ LoadExternalPointerField( - data_pointer, - FieldMemOperand(object, JSTypedArray::kExternalPointerOffset)); + __ BuildTypedArrayDataPointer(data_pointer, object); switch (elements_kind_) { case FLOAT32_ELEMENTS: __ Add(data_pointer, data_pointer, Operand(index, LSL, 2)); diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index 2bdc69d411..84f1ad26e7 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -99,6 +99,8 @@ class MaglevAssembler : public MacroAssembler { inline void StoreField(MemOperand operand, Register value, int element_size); inline void ReverseByteOrder(Register value, int element_size); + void BuildTypedArrayDataPointer(Register data_pointer, Register object); + // Warning: Input registers {string} and {index} will be scratched. // {result} is allowed to alias with one the other 3 input registers. // {result} is an int32. diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index 9ad23aa434..86a462913f 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -2213,11 +2213,6 @@ bool MaglevGraphBuilder::TryBuildElementAccess( // TODO(victorgomes): Support more elements kind. ElementsKind elements_kind = access_info.elements_kind(); if (IsTypedArrayElementsKind(elements_kind)) { - if (JSTypedArray::kMaxSizeInHeap != 0) { - // TODO(victorgomes): Support typed array in heap. - // Note that the common config is equal to 0 (for Chrome and Node). - return false; - } if (elements_kind == BIGUINT64_ELEMENTS || elements_kind == BIGINT64_ELEMENTS) { return false; diff --git a/src/maglev/x64/maglev-assembler-x64-inl.h b/src/maglev/x64/maglev-assembler-x64-inl.h index ee0bf03069..3cf540ed6e 100644 --- a/src/maglev/x64/maglev-assembler-x64-inl.h +++ b/src/maglev/x64/maglev-assembler-x64-inl.h @@ -189,6 +189,18 @@ inline MemOperand MaglevAssembler::ToMemOperand(const ValueLocation& location) { return ToMemOperand(location.operand()); } +inline void MaglevAssembler::BuildTypedArrayDataPointer(Register data_pointer, + Register object) { + DCHECK_NE(data_pointer, object); + LoadExternalPointerField( + data_pointer, FieldOperand(object, JSTypedArray::kExternalPointerOffset)); + if (JSTypedArray::kMaxSizeInHeap == 0) return; + + Register base = kScratchRegister; + movl(base, FieldOperand(object, JSTypedArray::kBasePointerOffset)); + addq(data_pointer, base); +} + inline void MaglevAssembler::LoadBoundedSizeFromObject(Register result, Register object, int offset) { diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index a61fc60c82..28818443f9 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -1071,21 +1071,23 @@ void LoadSignedIntTypedArrayElement::SetValueLocationConstraints() { UseRegister(object_input()); UseRegister(index_input()); DefineAsRegister(this); + set_temporaries_needed(1); } void LoadSignedIntTypedArrayElement::GenerateCode( MaglevAssembler* masm, const ProcessingState& state) { Register object = ToRegister(object_input()); Register index = ToRegister(index_input()); Register result_reg = ToRegister(result()); - Register data_pointer = result_reg; + __ AssertNotSmi(object); if (v8_flags.debug_code) { __ CmpObjectType(object, JS_TYPED_ARRAY_TYPE, kScratchRegister); __ Assert(equal, AbortReason::kUnexpectedValue); } + + Register data_pointer = general_temporaries().PopFirst(); + __ BuildTypedArrayDataPointer(data_pointer, object); int element_size = ElementsKindSize(elements_kind_); - __ LoadExternalPointerField( - data_pointer, FieldOperand(object, JSTypedArray::kExternalPointerOffset)); __ LoadSignedField( result_reg, Operand(data_pointer, index, ScaleFactorFromInt(element_size), 0), @@ -1096,21 +1098,23 @@ void LoadUnsignedIntTypedArrayElement::SetValueLocationConstraints() { UseRegister(object_input()); UseRegister(index_input()); DefineAsRegister(this); + set_temporaries_needed(1); } void LoadUnsignedIntTypedArrayElement::GenerateCode( MaglevAssembler* masm, const ProcessingState& state) { Register object = ToRegister(object_input()); Register index = ToRegister(index_input()); Register result_reg = ToRegister(result()); - Register data_pointer = result_reg; + __ AssertNotSmi(object); if (v8_flags.debug_code) { __ CmpObjectType(object, JS_TYPED_ARRAY_TYPE, kScratchRegister); __ Assert(equal, AbortReason::kUnexpectedValue); } + + Register data_pointer = general_temporaries().PopFirst(); int element_size = ElementsKindSize(elements_kind_); - __ LoadExternalPointerField( - data_pointer, FieldOperand(object, JSTypedArray::kExternalPointerOffset)); + __ BuildTypedArrayDataPointer(data_pointer, object); __ LoadUnsignedField( result_reg, Operand(data_pointer, index, ScaleFactorFromInt(element_size), 0), @@ -1121,20 +1125,21 @@ void LoadDoubleTypedArrayElement::SetValueLocationConstraints() { UseRegister(object_input()); UseRegister(index_input()); DefineAsRegister(this); + set_temporaries_needed(1); } void LoadDoubleTypedArrayElement::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { Register object = ToRegister(object_input()); Register index = ToRegister(index_input()); DoubleRegister result_reg = ToDoubleRegister(result()); - Register data_pointer = kScratchRegister; __ AssertNotSmi(object); if (v8_flags.debug_code) { __ CmpObjectType(object, JS_TYPED_ARRAY_TYPE, kScratchRegister); __ Assert(equal, AbortReason::kUnexpectedValue); } - __ LoadExternalPointerField( - data_pointer, FieldOperand(object, JSTypedArray::kExternalPointerOffset)); + + Register data_pointer = general_temporaries().PopFirst(); + __ BuildTypedArrayDataPointer(data_pointer, object); switch (elements_kind_) { case FLOAT32_ELEMENTS: __ Movss(result_reg, Operand(data_pointer, index, times_4, 0)); From 6eb0a668c2c3afc5c490700737811193278707cb Mon Sep 17 00:00:00 2001 From: Michael Lippautz Date: Wed, 4 Jan 2023 17:19:36 +0100 Subject: [PATCH 149/654] [heap] Move wrappable extraction logic out of LocalEmbedderHeapTracer Bug: v8:13207 Change-Id: I5d96454c7335e698ff79572706cf0c16640fdd53 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4136711 Reviewed-by: Anton Bikineev Commit-Queue: Michael Lippautz Cr-Commit-Position: refs/heads/main@{#85100} --- BUILD.bazel | 3 +- BUILD.gn | 3 +- src/heap/cppgc-js/cpp-heap.cc | 16 +++++++- src/heap/cppgc-js/cpp-heap.h | 1 + src/heap/cppgc-js/cpp-marking-state-inl.h | 11 ++--- src/heap/cppgc-js/cpp-snapshot.cc | 14 ++++--- src/heap/cppgc-js/wrappable-info-inl.h | 50 +++++++++++++++++++++++ src/heap/cppgc-js/wrappable-info.h | 34 +++++++++++++++ src/heap/embedder-tracing-inl.h | 46 --------------------- src/heap/embedder-tracing.cc | 34 --------------- src/heap/embedder-tracing.h | 34 --------------- src/heap/heap-write-barrier.cc | 7 ++-- 12 files changed, 121 insertions(+), 132 deletions(-) create mode 100644 src/heap/cppgc-js/wrappable-info-inl.h create mode 100644 src/heap/cppgc-js/wrappable-info.h delete mode 100644 src/heap/embedder-tracing-inl.h diff --git a/BUILD.bazel b/BUILD.bazel index 8a8c0cad50..d6792860e6 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1444,9 +1444,10 @@ filegroup( "src/heap/cppgc-js/unified-heap-marking-verifier.h", "src/heap/cppgc-js/unified-heap-marking-visitor.cc", "src/heap/cppgc-js/unified-heap-marking-visitor.h", + "src/heap/cppgc-js/wrappable-info.h", + "src/heap/cppgc-js/wrappable-info-inl.h", "src/heap/embedder-tracing.cc", "src/heap/embedder-tracing.h", - "src/heap/embedder-tracing-inl.h", "src/heap/evacuation-verifier.cc", "src/heap/evacuation-verifier.h", "src/heap/evacuation-verifier-inl.h", diff --git a/BUILD.gn b/BUILD.gn index 9beefd6966..e7b5853d0c 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -3128,7 +3128,8 @@ v8_header_set("v8_internal_headers") { "src/heap/cppgc-js/unified-heap-marking-state.h", "src/heap/cppgc-js/unified-heap-marking-verifier.h", "src/heap/cppgc-js/unified-heap-marking-visitor.h", - "src/heap/embedder-tracing-inl.h", + "src/heap/cppgc-js/wrappable-info-inl.h", + "src/heap/cppgc-js/wrappable-info.h", "src/heap/embedder-tracing.h", "src/heap/evacuation-allocator-inl.h", "src/heap/evacuation-allocator.h", diff --git a/src/heap/cppgc-js/cpp-heap.cc b/src/heap/cppgc-js/cpp-heap.cc index 7b7473f17f..c4b982e0b5 100644 --- a/src/heap/cppgc-js/cpp-heap.cc +++ b/src/heap/cppgc-js/cpp-heap.cc @@ -43,7 +43,6 @@ #include "src/heap/cppgc/sweeper.h" #include "src/heap/cppgc/unmarker.h" #include "src/heap/cppgc/visitor.h" -#include "src/heap/embedder-tracing-inl.h" #include "src/heap/embedder-tracing.h" #include "src/heap/gc-tracer.h" #include "src/heap/heap.h" @@ -763,6 +762,21 @@ bool CppHeap::FinishConcurrentMarkingIfNeeded() { return marker_->JoinConcurrentMarkingIfNeeded(); } +void CppHeap::WriteBarrier(JSObject js_object) { + DCHECK(js_object.MayHaveEmbedderFields()); + DCHECK_NOT_NULL(isolate()->heap()->mark_compact_collector()); + auto descriptor = wrapper_descriptor(); + const EmbedderDataSlot type_slot(js_object, descriptor.wrappable_type_index); + const EmbedderDataSlot instance_slot(js_object, + descriptor.wrappable_instance_index); + isolate() + ->heap() + ->mark_compact_collector() + ->local_marking_worklists() + ->cpp_marking_state() + ->MarkAndPush(type_slot, instance_slot); +} + namespace { void RecordEmbedderSpeed(GCTracer* tracer, base::TimeDelta marking_time, diff --git a/src/heap/cppgc-js/cpp-heap.h b/src/heap/cppgc-js/cpp-heap.h index 66298f38cb..b3bc5be03d 100644 --- a/src/heap/cppgc-js/cpp-heap.h +++ b/src/heap/cppgc-js/cpp-heap.h @@ -144,6 +144,7 @@ class V8_EXPORT_PRIVATE CppHeap final void TraceEpilogue(); void EnterFinalPause(cppgc::EmbedderStackState stack_state); bool FinishConcurrentMarkingIfNeeded(); + void WriteBarrier(JSObject); // StatsCollector::AllocationObserver interface. void AllocatedObjectSizeIncreased(size_t) final; diff --git a/src/heap/cppgc-js/cpp-marking-state-inl.h b/src/heap/cppgc-js/cpp-marking-state-inl.h index 23294b4dca..7652bea05d 100644 --- a/src/heap/cppgc-js/cpp-marking-state-inl.h +++ b/src/heap/cppgc-js/cpp-marking-state-inl.h @@ -6,7 +6,8 @@ #define V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_INL_H_ #include "src/heap/cppgc-js/cpp-marking-state.h" -#include "src/heap/embedder-tracing-inl.h" +#include "src/heap/cppgc-js/wrappable-info-inl.h" +#include "src/heap/cppgc-js/wrappable-info.h" #include "src/objects/embedder-data-slot.h" #include "src/objects/js-objects.h" @@ -33,11 +34,11 @@ void CppMarkingState::MarkAndPush(const EmbedderDataSnapshot& snapshot) { void CppMarkingState::MarkAndPush(const EmbedderDataSlot type_slot, const EmbedderDataSlot instance_slot) { - LocalEmbedderHeapTracer::WrapperInfo info; - if (LocalEmbedderHeapTracer::ExtractWrappableInfo( - isolate_, wrapper_descriptor_, type_slot, instance_slot, &info)) { + const auto maybe_info = WrappableInfo::From( + isolate_, type_slot, instance_slot, wrapper_descriptor_); + if (maybe_info.has_value()) { marking_state_.MarkAndPush( - cppgc::internal::HeapObjectHeader::FromObject(info.second)); + cppgc::internal::HeapObjectHeader::FromObject(maybe_info->instance)); } } diff --git a/src/heap/cppgc-js/cpp-snapshot.cc b/src/heap/cppgc-js/cpp-snapshot.cc index 1424f97618..a1e4525889 100644 --- a/src/heap/cppgc-js/cpp-snapshot.cc +++ b/src/heap/cppgc-js/cpp-snapshot.cc @@ -14,6 +14,8 @@ #include "src/base/logging.h" #include "src/execution/isolate.h" #include "src/heap/cppgc-js/cpp-heap.h" +#include "src/heap/cppgc-js/wrappable-info-inl.h" +#include "src/heap/cppgc-js/wrappable-info.h" #include "src/heap/cppgc/heap-object-header.h" #include "src/heap/cppgc/heap-visitor.h" #include "src/heap/cppgc/visitor.h" @@ -352,7 +354,7 @@ class StateStorage final { size_t state_count_ = 0; }; -void* ExtractEmbedderDataBackref(Isolate* isolate, +void* ExtractEmbedderDataBackref(Isolate* isolate, CppHeap& cpp_heap, v8::Local v8_value) { // See LocalEmbedderHeapTracer::VerboseWrapperTypeInfo for details on how // wrapper objects are set up. @@ -364,10 +366,10 @@ void* ExtractEmbedderDataBackref(Isolate* isolate, return nullptr; JSObject js_object = JSObject::cast(*v8_object); - return LocalEmbedderHeapTracer::VerboseWrapperInfo( - isolate->heap()->local_embedder_heap_tracer()->ExtractWrapperInfo( - isolate, js_object)) - .instance(); + + const auto maybe_info = + WrappableInfo::From(isolate, js_object, cpp_heap.wrapper_descriptor()); + return maybe_info.has_value() ? maybe_info->instance : nullptr; } // The following implements a snapshotting algorithm for C++ objects that also @@ -488,7 +490,7 @@ class CppGraphBuilderImpl final { void* back_reference_object = ExtractEmbedderDataBackref( reinterpret_cast(cpp_heap_.isolate()), - v8_value); + cpp_heap_, v8_value); if (back_reference_object) { auto& back_header = HeapObjectHeader::FromObject(back_reference_object); auto& back_state = states_.GetExistingState(back_header); diff --git a/src/heap/cppgc-js/wrappable-info-inl.h b/src/heap/cppgc-js/wrappable-info-inl.h new file mode 100644 index 0000000000..e9c65fd2d5 --- /dev/null +++ b/src/heap/cppgc-js/wrappable-info-inl.h @@ -0,0 +1,50 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_INL_H_ +#define V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_INL_H_ + +#include "src/base/optional.h" +#include "src/heap/cppgc-js/wrappable-info.h" +#include "src/objects/embedder-data-slot.h" +#include "src/objects/js-objects-inl.h" + +namespace v8::internal { + +// static +base::Optional WrappableInfo::From( + Isolate* isolate, JSObject wrapper, + const WrapperDescriptor& wrapper_descriptor) { + DCHECK(wrapper.MayHaveEmbedderFields()); + return wrapper.GetEmbedderFieldCount() < 2 + ? base::Optional() + : From(isolate, + EmbedderDataSlot(wrapper, + wrapper_descriptor.wrappable_type_index), + EmbedderDataSlot( + wrapper, wrapper_descriptor.wrappable_instance_index), + wrapper_descriptor); +} + +// static +base::Optional WrappableInfo::From( + Isolate* isolate, const EmbedderDataSlot& type_slot, + const EmbedderDataSlot& instance_slot, + const WrapperDescriptor& wrapper_descriptor) { + void* type; + void* instance; + if (type_slot.ToAlignedPointer(isolate, &type) && type && + instance_slot.ToAlignedPointer(isolate, &instance) && instance && + (wrapper_descriptor.embedder_id_for_garbage_collected == + WrapperDescriptor::kUnknownEmbedderId || + (*static_cast(type) == + wrapper_descriptor.embedder_id_for_garbage_collected))) { + return base::Optional(base::in_place, type, instance); + } + return {}; +} + +} // namespace v8::internal + +#endif // V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_INL_H_ diff --git a/src/heap/cppgc-js/wrappable-info.h b/src/heap/cppgc-js/wrappable-info.h new file mode 100644 index 0000000000..7a11daa10a --- /dev/null +++ b/src/heap/cppgc-js/wrappable-info.h @@ -0,0 +1,34 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_H_ +#define V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_H_ + +#include "include/v8-cppgc.h" +#include "src/base/optional.h" +#include "src/objects/embedder-data-slot.h" +#include "src/objects/js-objects.h" + +namespace v8::internal { + +class Isolate; + +struct WrappableInfo final { + public: + static V8_INLINE base::Optional From(Isolate*, JSObject, + const WrapperDescriptor&); + static V8_INLINE base::Optional From( + Isolate*, const EmbedderDataSlot& type_slot, + const EmbedderDataSlot& instance_slot, const WrapperDescriptor&); + + constexpr WrappableInfo(void* type, void* instance) + : type(type), instance(instance) {} + + void* type = nullptr; + void* instance = nullptr; +}; + +} // namespace v8::internal + +#endif // V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_H_ diff --git a/src/heap/embedder-tracing-inl.h b/src/heap/embedder-tracing-inl.h deleted file mode 100644 index 9a1c201f41..0000000000 --- a/src/heap/embedder-tracing-inl.h +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2021 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. -#ifndef V8_HEAP_EMBEDDER_TRACING_INL_H_ -#define V8_HEAP_EMBEDDER_TRACING_INL_H_ - -#include "src/heap/embedder-tracing.h" -#include "src/objects/embedder-data-slot.h" -#include "src/objects/js-objects-inl.h" - -namespace v8 { -namespace internal { - -// static -bool LocalEmbedderHeapTracer::ExtractWrappableInfo( - Isolate* isolate, JSObject js_object, - const WrapperDescriptor& wrapper_descriptor, WrapperInfo* info) { - DCHECK(js_object.MayHaveEmbedderFields()); - if (js_object.GetEmbedderFieldCount() < 2) return false; - - return ExtractWrappableInfo( - isolate, wrapper_descriptor, - EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_type_index), - EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_instance_index), - info); -} - -// static -bool LocalEmbedderHeapTracer::ExtractWrappableInfo( - Isolate* isolate, const WrapperDescriptor& wrapper_descriptor, - const EmbedderDataSlot& type_slot, const EmbedderDataSlot& instance_slot, - WrapperInfo* info) { - if (type_slot.ToAlignedPointer(isolate, &info->first) && info->first && - instance_slot.ToAlignedPointer(isolate, &info->second) && info->second) { - return (wrapper_descriptor.embedder_id_for_garbage_collected == - WrapperDescriptor::kUnknownEmbedderId) || - (*static_cast(info->first) == - wrapper_descriptor.embedder_id_for_garbage_collected); - } - return false; -} - -} // namespace internal -} // namespace v8 - -#endif // V8_HEAP_EMBEDDER_TRACING_INL_H_ diff --git a/src/heap/embedder-tracing.cc b/src/heap/embedder-tracing.cc index 349b94824d..9bd90da257 100644 --- a/src/heap/embedder-tracing.cc +++ b/src/heap/embedder-tracing.cc @@ -7,15 +7,10 @@ #include "include/cppgc/common.h" #include "include/v8-cppgc.h" #include "src/base/logging.h" -#include "src/handles/global-handles.h" -#include "src/heap/embedder-tracing-inl.h" -#include "src/heap/gc-tracer.h" #include "src/heap/marking-worklist-inl.h" namespace v8::internal { -START_ALLOW_USE_DEPRECATED() - void LocalEmbedderHeapTracer::SetCppHeap(CppHeap* cpp_heap) { cpp_heap_ = cpp_heap; } @@ -70,33 +65,4 @@ bool LocalEmbedderHeapTracer::IsRemoteTracingDone() { return !InUse() || cpp_heap()->IsTracingDone(); } -LocalEmbedderHeapTracer::WrapperInfo -LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate, - JSObject js_object) { - DCHECK(InUse()); - WrapperInfo info; - if (ExtractWrappableInfo(isolate, js_object, wrapper_descriptor(), &info)) { - return info; - } - return {nullptr, nullptr}; -} - -void LocalEmbedderHeapTracer::EmbedderWriteBarrier(Heap* heap, - JSObject js_object) { - DCHECK(InUse()); - DCHECK(js_object.MayHaveEmbedderFields()); - DCHECK_NOT_NULL(heap->mark_compact_collector()); - auto descriptor = wrapper_descriptor(); - const EmbedderDataSlot type_slot(js_object, descriptor.wrappable_type_index); - const EmbedderDataSlot instance_slot(js_object, - descriptor.wrappable_instance_index); - heap->mark_compact_collector() - ->local_marking_worklists() - ->cpp_marking_state() - ->MarkAndPush(type_slot, instance_slot); - return; -} - -END_ALLOW_USE_DEPRECATED() - } // namespace v8::internal diff --git a/src/heap/embedder-tracing.h b/src/heap/embedder-tracing.h index fac8988c7b..c0a8b900ce 100644 --- a/src/heap/embedder-tracing.h +++ b/src/heap/embedder-tracing.h @@ -25,32 +25,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { kMinor, kMajor, }; - using WrapperInfo = std::pair; - - // WrapperInfo is passed over the API. Use VerboseWrapperInfo to access pair - // internals in a named way. See ProcessingScope::TracePossibleJSWrapper() - // below on how a V8 object is parsed to gather the information. - struct VerboseWrapperInfo { - constexpr explicit VerboseWrapperInfo(const WrapperInfo& raw_info) - : raw_info(raw_info) {} - - // Information describing the type pointed to via instance(). - void* type_info() const { return raw_info.first; } - // Direct pointer to an instance described by type_info(). - void* instance() const { return raw_info.second; } - // Returns whether the info is empty and thus does not keep a C++ object - // alive. - bool is_empty() const { return !type_info() || !instance(); } - - const WrapperInfo& raw_info; - }; - - static V8_INLINE bool ExtractWrappableInfo(Isolate*, JSObject, - const WrapperDescriptor&, - WrapperInfo*); - static V8_INLINE bool ExtractWrappableInfo( - Isolate*, const WrapperDescriptor&, const EmbedderDataSlot& type_slot, - const EmbedderDataSlot& instance_slot, WrapperInfo*); explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {} @@ -87,14 +61,10 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { embedder_worklist_empty_ = is_empty; } - WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object); - cppgc::EmbedderStackState embedder_stack_state() const { return embedder_stack_state_; } - void EmbedderWriteBarrier(Heap*, JSObject); - private: CppHeap* cpp_heap() { DCHECK_NOT_NULL(cpp_heap_); @@ -102,10 +72,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { return cpp_heap_; } - WrapperDescriptor wrapper_descriptor() { - return cpp_heap()->wrapper_descriptor(); - } - Isolate* const isolate_; CppHeap* cpp_heap_ = nullptr; diff --git a/src/heap/heap-write-barrier.cc b/src/heap/heap-write-barrier.cc index 92ef73064d..2229bde545 100644 --- a/src/heap/heap-write-barrier.cc +++ b/src/heap/heap-write-barrier.cc @@ -58,10 +58,9 @@ void WriteBarrier::MarkingSlowFromGlobalHandle(HeapObject value) { // static void WriteBarrier::MarkingSlowFromInternalFields(Heap* heap, JSObject host) { - auto* local_embedder_heap_tracer = heap->local_embedder_heap_tracer(); - if (!local_embedder_heap_tracer->InUse()) return; - - local_embedder_heap_tracer->EmbedderWriteBarrier(heap, host); + if (auto* cpp_heap = heap->cpp_heap()) { + CppHeap::From(cpp_heap)->WriteBarrier(host); + } } void WriteBarrier::MarkingSlow(Code host, RelocInfo* reloc_info, From 3f75b580eb2fb4183a758d86ab1b4d4a1dda1f4f Mon Sep 17 00:00:00 2001 From: Darius M Date: Wed, 4 Jan 2023 17:37:29 +0100 Subject: [PATCH 150/654] [maglev] Fix bug because of output-input aliasing Bug: v8:7700 Change-Id: Ide3704bd44b8f531720ba38127e98c00e59a7d57 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4136712 Reviewed-by: Victor Gomes Commit-Queue: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85101} --- src/maglev/arm64/maglev-ir-arm64.cc | 18 +++++++++++------- src/maglev/x64/maglev-ir-x64.cc | 8 +++++--- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 0614d455af..c5a73de36a 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -885,7 +885,6 @@ void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm, __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this); } -// UNIMPLEMENTED_NODE(Int32SubtractWithOverflow) void Int32SubtractWithOverflow::SetValueLocationConstraints() { UseRegister(left_input()); UseRegister(right_input()); @@ -2368,7 +2367,9 @@ void TestTypeOf::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { using LiteralFlag = interpreter::TestTypeOfFlags::LiteralFlag; Register object = ToRegister(value()); - // Use return register as temporary if needed. + // Use return register as temporary if needed. Be careful: {object} and + // {scratch} could alias (which means that {object} should be considered dead + // once {scratch} has been written to). Register scratch = ToRegister(result()); Label is_true, is_false, done; switch (literal_) { @@ -2403,16 +2404,19 @@ void TestTypeOf::GenerateCode(MaglevAssembler* masm, __ CompareInstanceType(scratch, scratch, BIGINT_TYPE); __ B(ne, &is_false); break; - case LiteralFlag::kUndefined: + case LiteralFlag::kUndefined: { + UseScratchRegisterScope temps(masm); + Register map = temps.AcquireX(); __ JumpIfSmi(object, &is_false); // Check it has the undetectable bit set and it is not null. - __ LoadMap(scratch, object); - __ Ldr(scratch.W(), FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ TestAndBranchIfAllClear( - scratch.W(), Map::Bits1::IsUndetectableBit::kMask, &is_false); + __ LoadMap(map, object); + __ Ldr(map.W(), FieldMemOperand(map, Map::kBitFieldOffset)); + __ TestAndBranchIfAllClear(map.W(), Map::Bits1::IsUndetectableBit::kMask, + &is_false); __ CompareRoot(object, RootIndex::kNullValue); __ B(eq, &is_false); break; + } case LiteralFlag::kFunction: __ JumpIfSmi(object, &is_false); // Check if callable bit is set and not undetectable. diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 28818443f9..6d51afb627 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -2159,7 +2159,9 @@ void TestTypeOf::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { using LiteralFlag = interpreter::TestTypeOfFlags::LiteralFlag; Register object = ToRegister(value()); - // Use return register as temporary if needed. + // Use return register as temporary if needed. Be careful: {object} and {tmp} + // could alias (which means that {object} should be considered dead once {tmp} + // has been written to). Register tmp = ToRegister(result()); Label is_true, is_false, done; switch (literal_) { @@ -2199,8 +2201,8 @@ void TestTypeOf::GenerateCode(MaglevAssembler* masm, case LiteralFlag::kUndefined: __ JumpIfSmi(object, &is_false, Label::kNear); // Check it has the undetectable bit set and it is not null. - __ LoadMap(tmp, object); - __ testl(FieldOperand(tmp, Map::kBitFieldOffset), + __ LoadMap(kScratchRegister, object); + __ testl(FieldOperand(kScratchRegister, Map::kBitFieldOffset), Immediate(Map::Bits1::IsUndetectableBit::kMask)); __ j(zero, &is_false, Label::kNear); __ CompareRoot(object, RootIndex::kNullValue); From 071de173dca3a837b5f85714a75183fb3ae3e5c9 Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Wed, 4 Jan 2023 11:26:46 -0800 Subject: [PATCH 151/654] [string] Rename String::GetChars -> String::GetDirectStringChars GetChars may give the misimpression that it's usable with all flat strings, while it is only usable with direct strings. Change-Id: I1fd1ae93f75aca4079a2f65b5440a693dc2eb5c8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4133547 Commit-Queue: Shu-yu Guo Reviewed-by: Adam Klein Auto-Submit: Shu-yu Guo Cr-Commit-Position: refs/heads/main@{#85102} --- src/heap/factory-base.cc | 8 ++++---- src/objects/string-inl.h | 8 +++++--- src/objects/string-table.cc | 3 ++- src/objects/string.cc | 3 ++- src/objects/string.h | 6 +++--- 5 files changed, 16 insertions(+), 12 deletions(-) diff --git a/src/heap/factory-base.cc b/src/heap/factory-base.cc index 3072a93285..194f809461 100644 --- a/src/heap/factory-base.cc +++ b/src/heap/factory-base.cc @@ -763,14 +763,14 @@ MaybeHandle FactoryBase::NewConsString( uint8_t* dest = result->GetChars(no_gc, access_guard); // Copy left part. { - const uint8_t* src = - left->template GetChars(isolate(), no_gc, access_guard); + const uint8_t* src = left->template GetDirectStringChars( + isolate(), no_gc, access_guard); CopyChars(dest, src, left_length); } // Copy right part. { - const uint8_t* src = - right->template GetChars(isolate(), no_gc, access_guard); + const uint8_t* src = right->template GetDirectStringChars( + isolate(), no_gc, access_guard); CopyChars(dest + left_length, src, right_length); } return result; diff --git a/src/objects/string-inl.h b/src/objects/string-inl.h index 46e89cfc2c..22db7ff0f2 100644 --- a/src/objects/string-inl.h +++ b/src/objects/string-inl.h @@ -621,18 +621,20 @@ bool String::IsOneByteEqualTo(base::Vector str) { } template -const Char* String::GetChars(PtrComprCageBase cage_base, - const DisallowGarbageCollection& no_gc) const { +const Char* String::GetDirectStringChars( + PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc) const { DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this)); + DCHECK(StringShape(*this).IsDirect()); return StringShape(*this, cage_base).IsExternal() ? CharTraits::ExternalString::cast(*this).GetChars(cage_base) : CharTraits::String::cast(*this).GetChars(no_gc); } template -const Char* String::GetChars( +const Char* String::GetDirectStringChars( PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc, const SharedStringAccessGuardIfNeeded& access_guard) const { + DCHECK(StringShape(*this).IsDirect()); return StringShape(*this, cage_base).IsExternal() ? CharTraits::ExternalString::cast(*this).GetChars(cage_base) : CharTraits::String::cast(*this).GetChars(no_gc, diff --git a/src/objects/string-table.cc b/src/objects/string-table.cc index 00aa76a4a2..bf0f9fceb3 100644 --- a/src/objects/string-table.cc +++ b/src/objects/string-table.cc @@ -740,7 +740,8 @@ Address StringTable::Data::TryStringToIndexOrLookupExisting(Isolate* isolate, String::WriteToFlat(source, buffer.get(), 0, length, isolate, access_guard); chars = buffer.get(); } else { - chars = source.GetChars(isolate, no_gc, access_guard) + start; + chars = + source.GetDirectStringChars(isolate, no_gc, access_guard) + start; } if (!Name::IsHashFieldComputed(raw_hash_field) || !is_source_hash_usable) { diff --git a/src/objects/string.cc b/src/objects/string.cc index 867d2e30ea..fc54c66b00 100644 --- a/src/objects/string.cc +++ b/src/objects/string.cc @@ -1680,7 +1680,8 @@ uint32_t HashString(String string, size_t start, int length, uint64_t seed, access_guard); chars = buffer.get(); } else { - chars = string.GetChars(cage_base, no_gc, access_guard) + start; + chars = string.GetDirectStringChars(cage_base, no_gc, access_guard) + + start; } return StringHasher::HashSequentialString(chars, length, seed); diff --git a/src/objects/string.h b/src/objects/string.h index 41eb67f3b1..771a2a6567 100644 --- a/src/objects/string.h +++ b/src/objects/string.h @@ -207,12 +207,12 @@ class String : public TorqueGeneratedString { // SharedStringAccessGuard is not needed (i.e. on the main thread or on // read-only strings). template - inline const Char* GetChars(PtrComprCageBase cage_base, - const DisallowGarbageCollection& no_gc) const; + inline const Char* GetDirectStringChars( + PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc) const; // Get chars from sequential or external strings. template - inline const Char* GetChars( + inline const Char* GetDirectStringChars( PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc, const SharedStringAccessGuardIfNeeded& access_guard) const; From a6c2b3908073f8ea0c640c205278d017933e78b8 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Wed, 4 Jan 2023 12:23:08 +0800 Subject: [PATCH 152/654] [riscv] Remove unnecessary unbound label count The bind_to function doesn't link branch long to trampoline, so it doesn't need to add unbound_labels_count_. Change-Id: I2e3861a38eb65c285f19accb12bccb9f4c9fcfb1 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4133426 Auto-Submit: Yahan Lu Reviewed-by: ji qiu Commit-Queue: ji qiu Cr-Commit-Position: refs/heads/main@{#85103} --- src/codegen/riscv/assembler-riscv.cc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/codegen/riscv/assembler-riscv.cc b/src/codegen/riscv/assembler-riscv.cc index 668e3fa9a8..c7773c57cd 100644 --- a/src/codegen/riscv/assembler-riscv.cc +++ b/src/codegen/riscv/assembler-riscv.cc @@ -782,10 +782,6 @@ int32_t Assembler::branch_long_offset(Label* L) { L->link_to(pc_offset()); } else { L->link_to(pc_offset()); - if (!trampoline_emitted_) { - unbound_labels_count_++; - next_buffer_check_ -= kTrampolineSlotsSize; - } DEBUG_PRINTF("\tstarted link\n"); return kEndOfJumpChain; } From 2bb36a227559645f8e71dd645dde7f0d492297a2 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Wed, 4 Jan 2023 18:11:52 +0800 Subject: [PATCH 153/654] [riscv] Fix disasm error about fcvt.s.d Change-Id: I1046f5d7147a032b6f7c830c4ae3235bc9f55088 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4134468 Commit-Queue: ji qiu Auto-Submit: Yahan Lu Reviewed-by: ji qiu Cr-Commit-Position: refs/heads/main@{#85104} --- src/diagnostics/riscv/disasm-riscv.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diagnostics/riscv/disasm-riscv.cc b/src/diagnostics/riscv/disasm-riscv.cc index 1119a38014..0448413498 100644 --- a/src/diagnostics/riscv/disasm-riscv.cc +++ b/src/diagnostics/riscv/disasm-riscv.cc @@ -1269,7 +1269,7 @@ void Decoder::DecodeRFPType(Instruction* instr) { } case (RO_FCVT_S_D & kRFPTypeMask): { if (instr->Rs2Value() == 0b00001) { - Format(instr, "fcvt.s.d ['frm] 'fd, 'rs1"); + Format(instr, "fcvt.s.d ['frm] 'fd, 'fs1"); } else { UNSUPPORTED_RISCV(); } From 70253ba04e353ed7dcca6662635590eb7a3273ea Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Wed, 4 Jan 2023 19:27:21 -0800 Subject: [PATCH 154/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/44b5138..33bb56b Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/a404e6d..038b25e Rolling v8/third_party/depot_tools: https://chromium.googlesource.com/chromium/tools/depot_tools/+log/252b198..58a343c Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20230103.1.1..version:11.20230104.1.1 Rolling v8/third_party/zlib: https://chromium.googlesource.com/chromium/src/third_party/zlib/+log/18d27fa..fa5dc47 Change-Id: I49c9e11b32c782a4f0cb29b1559f708549d6e8bb Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4133999 Bot-Commit: v8-ci-autoroll-builder Commit-Queue: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85105} --- DEPS | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/DEPS b/DEPS index a8257c66b2..eb10fbc34b 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20230103.1.1', + 'fuchsia_version': 'version:11.20230104.1.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68e6038b5350cba18c341cc7c572170af5c5b20c', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '44b5138cf5012ca0e661db0ff0f723757ff6b2d6', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '33bb56b5658abbf77eb54898fb66ee5df3450723', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf8d11e41138ca777f3eaa09df41bf968c8be6ba', 'buildtools/clang_format/script': @@ -209,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'a404e6d108a230c0c4080a71705c6e6d7c30557a', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '038b25e395cafe0dc95b8139295eab6441315cc8', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -217,7 +217,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '252b19866a6a9f3de069363184e5fca72280e558', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '58a343c88bda7ef0b65ad9bdf208b9307446dfbe', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { @@ -270,7 +270,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '18d27fa10b237fdfcbd8f0c65c19fe009981a3bc', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'fa5dc4750029333c2486efa38eec7d13890108ed', 'tools/clang': Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '3b54a131a94f1345579c9d92b08c2b45c43cfe77', 'tools/luci-go': { From 5b8d62d8303065fa8b4e3208a1f1bb2d530913e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20Z=C3=BCnd?= Date: Wed, 4 Jan 2023 11:35:31 +0100 Subject: [PATCH 155/654] [debug] Only 'step-in' on function entry for scheduled pauses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This CL fixes a bug where we wouldn't pause (or even crash) when trying to interrupt an infinite loop. When we pause via stack check (i.e. a scheduled break) we currently do one additional step-in. We do so to enter functions properly in case we are paused in the middle of setting up the stack frame. Loops also do a stack check, to support pausing infinite loops. In that case we can skip the additional step-in as we are already in a valid pause position (as implemented by this CL). This CL also removes two bogus DCHECKs. We assumed that a scheduled break never happens after a step. This is wrong, e.g. a user can click the pause button after stepping over a long running function. Note that we duplicate the various loop interruption cctests to also interrupt the loops with the "scheduled" break reason. Without the changes in debug.cc, those won't pass. The CL https://crrev.com/c/4136058 adds a regression test on the blink side. R=jarin@chromium.org Fixed: chromium:1401674 Change-Id: I42b44744b17d24351f01b83c0446908c24e6c5fd Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4134246 Commit-Queue: Simon Zünd Reviewed-by: Jaroslav Sevcik Cr-Commit-Position: refs/heads/main@{#85106} --- src/debug/debug.cc | 11 +++++++---- src/debug/debug.h | 1 + test/cctest/test-debug.cc | 9 ++++++++- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/debug/debug.cc b/src/debug/debug.cc index 505817a552..eecac00120 100644 --- a/src/debug/debug.cc +++ b/src/debug/debug.cc @@ -119,6 +119,11 @@ BreakLocation BreakLocation::FromFrame(Handle debug_info, return it.GetBreakLocation(); } +bool BreakLocation::IsPausedInJsFunctionEntry(JavaScriptFrame* frame) { + auto summary = FrameSummary::GetTop(frame); + return summary.code_offset() == kFunctionEntryBytecodeOffset; +} + MaybeHandle Debug::CheckBreakPointsForLocations( Handle debug_info, std::vector& break_locations, bool* has_break_points) { @@ -545,8 +550,6 @@ void Debug::Break(JavaScriptFrame* frame, Handle break_target) { if (!break_points_hit.is_null() || break_on_next_function_call() || scheduled_break) { StepAction lastStepAction = last_step_action(); - DCHECK_IMPLIES(scheduled_break_on_function_call(), - lastStepAction == StepNone); debug::BreakReasons break_reasons; if (scheduled_break) { break_reasons.Add(debug::BreakReason::kScheduled); @@ -2580,8 +2583,8 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode, // it's context. Instead, we step into the function and pause at the // first official breakable position. // This behavior mirrors "BreakOnNextFunctionCall". - if (break_reasons.contains(v8::debug::BreakReason::kScheduled)) { - CHECK_EQ(last_step_action(), StepAction::StepNone); + if (break_reasons.contains(v8::debug::BreakReason::kScheduled) && + BreakLocation::IsPausedInJsFunctionEntry(frame)) { thread_local_.scheduled_break_on_next_function_call_ = true; PrepareStepIn(function); return; diff --git a/src/debug/debug.h b/src/debug/debug.h index 3be05bbd94..c717dc4536 100644 --- a/src/debug/debug.h +++ b/src/debug/debug.h @@ -69,6 +69,7 @@ class BreakLocation { static BreakLocation Invalid() { return BreakLocation(-1, NOT_DEBUG_BREAK); } static BreakLocation FromFrame(Handle debug_info, JavaScriptFrame* frame); + static bool IsPausedInJsFunctionEntry(JavaScriptFrame* frame); static void AllAtCurrentStatement(Handle debug_info, JavaScriptFrame* frame, diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc index d856eb9775..8ac4083f67 100644 --- a/test/cctest/test-debug.cc +++ b/test/cctest/test-debug.cc @@ -255,8 +255,9 @@ class DebugEventBreak : public v8::debug::DebugDelegate { } }; +v8::debug::BreakReasons break_right_now_reasons = {}; static void BreakRightNow(v8::Isolate* isolate, void*) { - v8::debug::BreakRightNow(isolate); + v8::debug::BreakRightNow(isolate, break_right_now_reasons); } // Debug event handler which re-issues a debug break until a limit has been @@ -3830,6 +3831,12 @@ void DebugBreakLoop(const char* loop_header, const char** loop_bodies, TestDebugBreakInLoop(loop_header, loop_bodies, loop_footer); + // Also test with "Scheduled" break reason. + break_right_now_reasons = + v8::debug::BreakReasons{v8::debug::BreakReason::kScheduled}; + TestDebugBreakInLoop(loop_header, loop_bodies, loop_footer); + break_right_now_reasons = v8::debug::BreakReasons{}; + // Get rid of the debug event listener. v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr); CheckDebuggerUnloaded(); From 3094c4002b512505e866f14ce4f9f215f8b9cecd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20Z=C3=BCnd?= Date: Wed, 4 Jan 2023 13:56:20 +0100 Subject: [PATCH 156/654] [debug] Fix stepping through single statement loops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The debugger utilizes the source position while single stepping ("Step-in") through the source to go from statement to statement and skipping some expressions along the way. The debugger remembers the "statement position" of the last stepping action. This works well in general but falls flat for loops that only have a single statement in them. Every step lands on the same statement, just one loop iteration later. We detect this case by checking if we are in the same frame and have the exact same bytecode offset as the last step action. Note that this also fixes "frame restarting" should we have restarted a function while paused at the beginning of that function. R=jarin@chromium.org Bug: chromium:1401674 Change-Id: Id0a5753ed7cc9f23f22d869368d88e1c4b48566d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135881 Commit-Queue: Simon Zünd Reviewed-by: Jaroslav Sevcik Cr-Commit-Position: refs/heads/main@{#85107} --- src/debug/debug.cc | 23 ++++++++--- src/debug/debug.h | 3 ++ .../set-script-source-top-frame-expected.txt | 2 +- .../regress-crbug-1401674-expected.txt | 5 +++ .../regress/regress-crbug-1401674.js | 38 +++++++++++++++++++ 5 files changed, 64 insertions(+), 7 deletions(-) create mode 100644 test/inspector/regress/regress-crbug-1401674-expected.txt create mode 100644 test/inspector/regress/regress-crbug-1401674.js diff --git a/src/debug/debug.cc b/src/debug/debug.cc index eecac00120..fc9a3881f4 100644 --- a/src/debug/debug.cc +++ b/src/debug/debug.cc @@ -390,6 +390,7 @@ void Debug::ThreadInit() { thread_local_.break_frame_id_ = StackFrameId::NO_ID; thread_local_.last_step_action_ = StepNone; thread_local_.last_statement_position_ = kNoSourcePosition; + thread_local_.last_bytecode_offset_ = kFunctionEntryBytecodeOffset; thread_local_.last_frame_count_ = -1; thread_local_.fast_forward_to_return_ = false; thread_local_.ignore_step_into_function_ = Smi::zero(); @@ -621,10 +622,19 @@ void Debug::Break(JavaScriptFrame* frame, Handle break_target) { return; } FrameSummary summary = FrameSummary::GetTop(frame); + const bool frame_or_statement_changed = + current_frame_count != last_frame_count || + thread_local_.last_statement_position_ != + summary.SourceStatementPosition(); + // If we stayed on the same frame and reached the same bytecode offset + // since the last step, we are in a loop and should pause. Otherwise + // we keep "stepping" through the loop without ever acutally pausing. + const bool potential_single_statement_loop = + current_frame_count == last_frame_count && + thread_local_.last_bytecode_offset_ == summary.code_offset(); step_break = step_break || location.IsReturn() || - current_frame_count != last_frame_count || - thread_local_.last_statement_position_ != - summary.SourceStatementPosition(); + potential_single_statement_loop || + frame_or_statement_changed; break; } } @@ -1268,9 +1278,8 @@ void Debug::PrepareStep(StepAction step_action) { // A step-next in blackboxed function is a step-out. if (step_action == StepOver && IsBlackboxed(shared)) step_action = StepOut; - thread_local_.last_statement_position_ = - summary.abstract_code()->SourceStatementPosition(isolate_, - summary.code_offset()); + thread_local_.last_statement_position_ = summary.SourceStatementPosition(); + thread_local_.last_bytecode_offset_ = summary.code_offset(); thread_local_.last_frame_count_ = current_frame_count; // No longer perform the current async step. clear_suspended_generator(); @@ -1297,6 +1306,7 @@ void Debug::PrepareStep(StepAction step_action) { case StepOut: { // Clear last position info. For stepping out it does not matter. thread_local_.last_statement_position_ = kNoSourcePosition; + thread_local_.last_bytecode_offset_ = kFunctionEntryBytecodeOffset; thread_local_.last_frame_count_ = -1; if (!shared.is_null()) { if (!location.IsReturnOrSuspend() && !IsBlackboxed(shared)) { @@ -1412,6 +1422,7 @@ void Debug::ClearStepping() { thread_local_.last_step_action_ = StepNone; thread_local_.last_statement_position_ = kNoSourcePosition; + thread_local_.last_bytecode_offset_ = kFunctionEntryBytecodeOffset; thread_local_.ignore_step_into_function_ = Smi::zero(); thread_local_.fast_forward_to_return_ = false; thread_local_.last_frame_count_ = -1; diff --git a/src/debug/debug.h b/src/debug/debug.h index c717dc4536..cf49ce66e8 100644 --- a/src/debug/debug.h +++ b/src/debug/debug.h @@ -577,6 +577,9 @@ class V8_EXPORT_PRIVATE Debug { // Source statement position from last step next action. int last_statement_position_; + // Bytecode offset from last step next action. + int last_bytecode_offset_; + // Frame pointer from last step next or step frame action. int last_frame_count_; diff --git a/test/inspector/debugger/set-script-source-top-frame-expected.txt b/test/inspector/debugger/set-script-source-top-frame-expected.txt index 8f39a070fd..88a6cde245 100644 --- a/test/inspector/debugger/set-script-source-top-frame-expected.txt +++ b/test/inspector/debugger/set-script-source-top-frame-expected.txt @@ -6,7 +6,7 @@ function testExpression(a, b) { Paused at (after live edit): function testExpression(a, b) { - return a * b;# + #return a * b; } Result: diff --git a/test/inspector/regress/regress-crbug-1401674-expected.txt b/test/inspector/regress/regress-crbug-1401674-expected.txt new file mode 100644 index 0000000000..fe03f46744 --- /dev/null +++ b/test/inspector/regress/regress-crbug-1401674-expected.txt @@ -0,0 +1,5 @@ +Regression test for crbug.com/1401674. Properly step through single statement loops. +Expecting debugger to pause after the step ... +SUCCESS +Stepping to the same statement but in the next iteration ... +SUCCESS diff --git a/test/inspector/regress/regress-crbug-1401674.js b/test/inspector/regress/regress-crbug-1401674.js new file mode 100644 index 0000000000..986a837109 --- /dev/null +++ b/test/inspector/regress/regress-crbug-1401674.js @@ -0,0 +1,38 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +let {Protocol} = InspectorTest.start('Regression test for crbug.com/1401674. Properly step through single statement loops.'); + +(async () => { + await Protocol.Debugger.enable(); + + Protocol.Runtime.evaluate({ + expression: ` + function f() { + let i = 0; + debugger; + while (true) {i++} + } + + f(); + `}); + + await Protocol.Debugger.oncePaused(); + + Protocol.Debugger.stepInto(); + await Protocol.Debugger.oncePaused(); + + InspectorTest.log('Expecting debugger to pause after the step ...'); + Protocol.Debugger.stepInto(); + await Protocol.Debugger.oncePaused(); + + InspectorTest.log('SUCCESS'); + InspectorTest.log('Stepping to the same statement but in the next iteration ...'); + + Protocol.Debugger.stepInto(); + await Protocol.Debugger.oncePaused(); + + InspectorTest.log('SUCCESS'); + InspectorTest.completeTest(); +})(); From fcae4c1383589b5e29b8ae6450be82c06183ee10 Mon Sep 17 00:00:00 2001 From: Yahan Lu Date: Thu, 5 Jan 2023 07:35:21 +0000 Subject: [PATCH 157/654] Revert "[riscv] Remove unnecessary unbound label count" This reverts commit a6c2b3908073f8ea0c640c205278d017933e78b8. Reason for revert: Failed tests Original change's description: > [riscv] Remove unnecessary unbound label count > > The bind_to function doesn't link branch long to trampoline, so it doesn't need to add unbound_labels_count_. > > Change-Id: I2e3861a38eb65c285f19accb12bccb9f4c9fcfb1 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4133426 > Auto-Submit: Yahan Lu > Reviewed-by: ji qiu > Commit-Queue: ji qiu > Cr-Commit-Position: refs/heads/main@{#85103} Change-Id: I651762d71a8e86bbe76a10224a63433cdacfadfe No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4136999 Bot-Commit: Rubber Stamper Auto-Submit: Yahan Lu Commit-Queue: Rubber Stamper Cr-Commit-Position: refs/heads/main@{#85108} --- src/codegen/riscv/assembler-riscv.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/codegen/riscv/assembler-riscv.cc b/src/codegen/riscv/assembler-riscv.cc index c7773c57cd..668e3fa9a8 100644 --- a/src/codegen/riscv/assembler-riscv.cc +++ b/src/codegen/riscv/assembler-riscv.cc @@ -782,6 +782,10 @@ int32_t Assembler::branch_long_offset(Label* L) { L->link_to(pc_offset()); } else { L->link_to(pc_offset()); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } DEBUG_PRINTF("\tstarted link\n"); return kEndOfJumpChain; } From d333e5b5aa1265696b1e2bd73af3ddb11fd87b24 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Thu, 5 Jan 2023 15:38:30 +0800 Subject: [PATCH 158/654] [riscv] Fix disasm unittest error Change-Id: I5e342abad192189fc88aae185901ba776643c0dc Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4134473 Reviewed-by: ji qiu Auto-Submit: Yahan Lu Commit-Queue: ji qiu Cr-Commit-Position: refs/heads/main@{#85109} --- test/unittests/assembler/disasm-riscv-unittest.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unittests/assembler/disasm-riscv-unittest.cc b/test/unittests/assembler/disasm-riscv-unittest.cc index 7bc9ca42cb..7bff5b9925 100644 --- a/test/unittests/assembler/disasm-riscv-unittest.cc +++ b/test/unittests/assembler/disasm-riscv-unittest.cc @@ -378,7 +378,7 @@ TEST_F(DisasmRiscv64Test, RV32D) { COMPARE(fsgnjx_d(ft0, ft8, fa5), "22fe2053 fsgnjx.d ft0, ft8, fa5"); COMPARE(fmin_d(ft0, ft8, fa5), "2afe0053 fmin.d ft0, ft8, fa5"); COMPARE(fmax_d(ft0, ft8, fa5), "2afe1053 fmax.d ft0, ft8, fa5"); - COMPARE(fcvt_s_d(ft0, ft8, RDN), "401e2053 fcvt.s.d [RDN] ft0, t3"); + COMPARE(fcvt_s_d(ft0, ft8, RDN), "401e2053 fcvt.s.d [RDN] ft0, ft8"); COMPARE(fcvt_d_s(ft0, fa0), "42050053 fcvt.d.s ft0, fa0"); COMPARE(feq_d(a0, ft8, fa5), "a2fe2553 feq.d a0, ft8, fa5"); COMPARE(flt_d(a0, ft8, fa5), "a2fe1553 flt.d a0, ft8, fa5"); From ca3a939da83b87453d8c59d8acced5ac6452ef50 Mon Sep 17 00:00:00 2001 From: Vladimir Nechaev Date: Wed, 4 Jan 2023 18:14:42 +0000 Subject: [PATCH 159/654] [inspector] Provide more details about destroyed context MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Runtime.executionContextCreated provides many details in ExecutionContextDescription structure while Runtime.executionContextDestroyed provides only executionContextId. This information is insufficient for the clients that use uniqueContextId. Bug: v8:12896 Change-Id: I31df0ed618dc1c8b55c7eba8f96eeaef2d4de6c8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3657439 Commit-Queue: Simon Zünd Auto-Submit: Vladimir Nechaev Reviewed-by: Simon Zünd Cr-Commit-Position: refs/heads/main@{#85110} --- include/js_protocol.pdl | 4 +++- src/inspector/v8-runtime-agent-impl.cc | 3 ++- test/inspector/protocol-test.js | 3 ++- .../context-destroyed-on-context-collected-expected.txt | 1 + test/inspector/sessions/create-session-expected.txt | 3 +++ 5 files changed, 11 insertions(+), 3 deletions(-) diff --git a/include/js_protocol.pdl b/include/js_protocol.pdl index 7960a56f54..d4102f5c6c 100644 --- a/include/js_protocol.pdl +++ b/include/js_protocol.pdl @@ -1741,7 +1741,9 @@ domain Runtime event executionContextDestroyed parameters # Id of the destroyed context - ExecutionContextId executionContextId + deprecated ExecutionContextId executionContextId + # Unique Id of the destroyed context + experimental string executionContextUniqueId # Issued when all executionContexts were cleared in browser event executionContextsCleared diff --git a/src/inspector/v8-runtime-agent-impl.cc b/src/inspector/v8-runtime-agent-impl.cc index 5cb582c0bf..9a1a845487 100644 --- a/src/inspector/v8-runtime-agent-impl.cc +++ b/src/inspector/v8-runtime-agent-impl.cc @@ -997,7 +997,8 @@ void V8RuntimeAgentImpl::reportExecutionContextDestroyed( InspectedContext* context) { if (m_enabled && context->isReported(m_session->sessionId())) { context->setReported(m_session->sessionId(), false); - m_frontend.executionContextDestroyed(context->contextId()); + m_frontend.executionContextDestroyed(context->contextId(), + context->uniqueId().toString()); } } diff --git a/test/inspector/protocol-test.js b/test/inspector/protocol-test.js index d45ea22baf..a5e9505b82 100644 --- a/test/inspector/protocol-test.js +++ b/test/inspector/protocol-test.js @@ -36,7 +36,8 @@ InspectorTest.logMessage = function(originalMessage) { const nonStableFields = new Set([ 'objectId', 'scriptId', 'exceptionId', 'timestamp', 'executionContextId', 'callFrameId', 'breakpointId', 'bindRemoteObjectFunctionId', - 'formatterObjectId', 'debuggerId', 'bodyGetterId', 'uniqueId' + 'formatterObjectId', 'debuggerId', 'bodyGetterId', 'uniqueId', + 'executionContextUniqueId' ]); const message = JSON.parse(JSON.stringify(originalMessage, replacer.bind(null, Symbol(), nonStableFields))); if (message.id) diff --git a/test/inspector/runtime/context-destroyed-on-context-collected-expected.txt b/test/inspector/runtime/context-destroyed-on-context-collected-expected.txt index 9a5e1708c1..381cf88c2e 100644 --- a/test/inspector/runtime/context-destroyed-on-context-collected-expected.txt +++ b/test/inspector/runtime/context-destroyed-on-context-collected-expected.txt @@ -3,5 +3,6 @@ Tests that contextDesrtoyed nofitication is fired when context is collected. method : Runtime.executionContextDestroyed params : { executionContextId : + executionContextUniqueId : } } diff --git a/test/inspector/sessions/create-session-expected.txt b/test/inspector/sessions/create-session-expected.txt index 4459f4d19c..b8f9c5f828 100644 --- a/test/inspector/sessions/create-session-expected.txt +++ b/test/inspector/sessions/create-session-expected.txt @@ -70,6 +70,7 @@ From session 2 method : Runtime.executionContextDestroyed params : { executionContextId : + executionContextUniqueId : } } id matching: true @@ -78,6 +79,7 @@ From session 1 method : Runtime.executionContextDestroyed params : { executionContextId : + executionContextUniqueId : } } id matching: true @@ -86,6 +88,7 @@ From session 3 method : Runtime.executionContextDestroyed params : { executionContextId : + executionContextUniqueId : } } id matching: true From 43fd63554efe6f92f59649805ffe1bcd1ea66812 Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Tue, 3 Jan 2023 21:32:02 +0100 Subject: [PATCH 160/654] [gcmole] Add regression test with multiple safepoints Bug: v8:13536 Change-Id: I1cac6a34b6948f7e5365c5454ad6d3f928d906d4 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4134164 Commit-Queue: Michael Achenbach Reviewed-by: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#85111} --- tools/gcmole/gcmole-test.cc | 10 ++++++++++ tools/gcmole/test-expectations.txt | 11 ++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/tools/gcmole/gcmole-test.cc b/tools/gcmole/gcmole-test.cc index 038a514189..39d7ce1e15 100644 --- a/tools/gcmole/gcmole-test.cc +++ b/tools/gcmole/gcmole-test.cc @@ -349,5 +349,15 @@ void TestGuardedDeadVarAnalysisMidFunction2(Isolate* isolate) { raw_obj.Print(); } +void TestGuardedDeadVarAnalysisMultipleSafepoints(Isolate* isolate) { + // TODO(https://crbug.com/v8/13536): The analysis points to this safepoint, + // while it should point to the one below. + Safepoint(); + JSObject raw_obj = *isolate->factory()->NewJSObjectWithNullProto(); + DisallowGarbageCollection no_gc; + Safepoint(); + raw_obj.Print(); +} + } // namespace internal } // namespace v8 diff --git a/tools/gcmole/test-expectations.txt b/tools/gcmole/test-expectations.txt index d057fa4b1e..fa3e4b9e28 100644 --- a/tools/gcmole/test-expectations.txt +++ b/tools/gcmole/test-expectations.txt @@ -214,4 +214,13 @@ tools/gcmole/gcmole-test.cc:345:3: note: Call might cause unexpected GC. tools/gcmole/gcmole-test.cc:27:1: note: GC call here. Object CauseGCRaw(Object obj, Isolate* isolate) { ^ -24 warnings generated. +tools/gcmole/gcmole-test.cc:359:3: warning: Possibly stale variable due to GCs. + raw_obj.Print(); + ^ +tools/gcmole/gcmole-test.cc:355:3: note: Call might cause unexpected GC. + Safepoint(); + ^ +tools/gcmole/gcmole-test.cc:19:1: note: GC call here. +void Safepoint() { LocalHeap::Current()->Safepoint(); } +^ +25 warnings generated. From 73aaf18f362718a14b701ef6cbb095cc2a1a2b2c Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Thu, 5 Jan 2023 11:23:42 +0100 Subject: [PATCH 161/654] [gcmole] Remove legacy gcmole steps This was running side-by-side in production now for >1 month. Now we remove the sequential gcmole step and only keep running the parallel version. We keep the sequential test run to ensure it keeps working for developers who still use this locally. Bug: v8:12660 Change-Id: If92516948d0cc3c03c9a4a18bd216ce63c18dfc3 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4136727 Reviewed-by: Leszek Swirski Auto-Submit: Michael Achenbach Commit-Queue: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#85112} --- infra/testing/builders.pyl | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/infra/testing/builders.pyl b/infra/testing/builders.pyl index 2a9a986bc2..ab9ccf0c89 100644 --- a/infra/testing/builders.pyl +++ b/infra/testing/builders.pyl @@ -180,11 +180,6 @@ ], 'shards': 4, }, - {'name': 'gcmole_v2', 'variant': 'ia32'}, - {'name': 'gcmole_v2', 'variant': 'x64'}, - {'name': 'gcmole_v2', 'variant': 'arm'}, - {'name': 'gcmole_v2', 'variant': 'arm64'}, - # TODO(https://crbug.com/v8/12660): Remove v2 above after testing. {'name': 'gcmole_v3', 'variant': 'ia32', 'shards': 4}, {'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4}, {'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4}, @@ -1099,11 +1094,6 @@ 'test_args': ['--extra-flags', '--noenable-avx'], 'shards': 2 }, - {'name': 'gcmole_v2', 'variant': 'ia32'}, - {'name': 'gcmole_v2', 'variant': 'x64'}, - {'name': 'gcmole_v2', 'variant': 'arm'}, - {'name': 'gcmole_v2', 'variant': 'arm64'}, - # TODO(https://crbug.com/v8/12660): Remove v2 above after testing. {'name': 'gcmole_v3', 'variant': 'ia32', 'shards': 4}, {'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4}, {'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4}, From 59136c60454fc639d097b2b9a7518c0ced759aaf Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Thu, 5 Jan 2023 11:20:46 +0100 Subject: [PATCH 162/654] [log] Remove is_listening_to_code_events cache V8FileLogger has a dynamic behaviour when listening to code events, i.e., it can stop listening without removing itself from the Logger, which invalidates the field is_listening_to_code_events_. This field is only updated when adding/removing an event listener. This cache was recently introduced in a refactoring https://crrev.com/c/3582125 Bug: chromium:1400809 Change-Id: If93c88a6a64f5bf2c10265ac1db455ea498733a0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4136726 Auto-Submit: Victor Gomes Commit-Queue: Patrick Thier Reviewed-by: Patrick Thier Cr-Commit-Position: refs/heads/main@{#85113} --- src/logging/code-events.h | 22 ++++------------------ test/mjsunit/regress-1400809.js | 11 +++++++++++ 2 files changed, 15 insertions(+), 18 deletions(-) create mode 100644 test/mjsunit/regress-1400809.js diff --git a/src/logging/code-events.h b/src/logging/code-events.h index 01d1c5ea5e..cb815cc022 100644 --- a/src/logging/code-events.h +++ b/src/logging/code-events.h @@ -124,10 +124,6 @@ class Logger { if (position != listeners_.end()) return false; // Add the listener to the end and update the element listeners_.push_back(listener); - if (!_is_listening_to_code_events) { - _is_listening_to_code_events |= listener->is_listening_to_code_events(); - } - DCHECK_EQ(_is_listening_to_code_events, IsListeningToCodeEvents()); return true; } void RemoveListener(LogEventListener* listener) { @@ -135,15 +131,13 @@ class Logger { auto position = std::find(listeners_.begin(), listeners_.end(), listener); if (position == listeners_.end()) return; listeners_.erase(position); - if (listener->is_listening_to_code_events()) { - _is_listening_to_code_events = IsListeningToCodeEvents(); - } - DCHECK_EQ(_is_listening_to_code_events, IsListeningToCodeEvents()); } bool is_listening_to_code_events() const { - DCHECK_EQ(_is_listening_to_code_events, IsListeningToCodeEvents()); - return _is_listening_to_code_events; + for (auto listener : listeners_) { + if (listener->is_listening_to_code_events()) return true; + } + return false; } void CodeCreateEvent(CodeTag tag, Handle code, @@ -264,16 +258,8 @@ class Logger { } private: - bool IsListeningToCodeEvents() const { - for (auto listener : listeners_) { - if (listener->is_listening_to_code_events()) return true; - } - return false; - } - std::vector listeners_; base::Mutex mutex_; - bool _is_listening_to_code_events = false; }; } // namespace internal diff --git a/test/mjsunit/regress-1400809.js b/test/mjsunit/regress-1400809.js new file mode 100644 index 0000000000..4b559fdca1 --- /dev/null +++ b/test/mjsunit/regress-1400809.js @@ -0,0 +1,11 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --expose-gc --logfile='+' --log + +const log = d8.log.getAndStop(); +gc(); +function __f_6() { +} +__v_1 = __f_6(); From 68047ec37fa5716528656a51927dda885691ff4a Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Wed, 4 Jan 2023 17:20:51 +0100 Subject: [PATCH 163/654] [wasm][streaming] Avoid UAF after context disposal After a call to {StreamingDecoder::NotifyCompilationEnded}, no method on the {StreamingProcessor} should be called any more. We were still calling the {OnAbort} method later. To make the semantics a bit more clear, we rename {NotifyCompilationEnded} to {NotifyCompilationDiscarded}. We also remove the {stream_finished_} field and reset the processor instead, which will result in a nullptr access if we try to illegally call any further methods. R=ahaas@chromium.org Bug: chromium:1403531, chromium:1399790, chromium:1400066 Change-Id: I4caef3801dfe9d653125efbd7bc9b5d13ce30dc7 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4132966 Reviewed-by: Andreas Haas Commit-Queue: Clemens Backes Cr-Commit-Position: refs/heads/main@{#85114} --- src/wasm/module-compiler.cc | 20 +++++---------- src/wasm/streaming-decoder.cc | 40 ++++++++++++++++++------------ src/wasm/streaming-decoder.h | 4 +-- src/wasm/sync-streaming-decoder.cc | 2 +- src/wasm/wasm-js.cc | 13 ++++++++++ src/wasm/wasm-js.h | 14 ++++++++--- test/cctest/cctest.status | 4 +-- test/cctest/test-api.cc | 38 ++++++++++++++++++++++++++-- 8 files changed, 94 insertions(+), 41 deletions(-) diff --git a/src/wasm/module-compiler.cc b/src/wasm/module-compiler.cc index 63468aee4b..d817ff931e 100644 --- a/src/wasm/module-compiler.cc +++ b/src/wasm/module-compiler.cc @@ -2131,9 +2131,7 @@ AsyncCompileJob::~AsyncCompileJob() { } // Tell the streaming decoder that the AsyncCompileJob is not available // anymore. - // TODO(ahaas): Is this notification really necessary? Check - // https://crbug.com/888170. - if (stream_) stream_->NotifyCompilationEnded(); + if (stream_) stream_->NotifyCompilationDiscarded(); CancelPendingForegroundTask(); isolate_->global_handles()->Destroy(native_context_.location()); isolate_->global_handles()->Destroy(incumbent_context_.location()); @@ -2815,9 +2813,6 @@ void AsyncStreamingProcessor::OnFinishedStream( if (validate_functions_job_data_.found_error) after_error = true; } - // Check that we did not abort or finish the job before. - CHECK(job_); - job_->wire_bytes_ = ModuleWireBytes(bytes.as_vector()); job_->bytes_copy_ = std::move(bytes); @@ -2836,8 +2831,8 @@ void AsyncStreamingProcessor::OnFinishedStream( // Clean up the temporary cache entry. GetWasmEngine()->StreamingCompilationFailed(prefix_hash_); } + // Calling {Failed} will invalidate the {AsyncCompileJob} and delete {this}. job_->Failed(); - job_ = nullptr; return; } @@ -2906,14 +2901,13 @@ void AsyncStreamingProcessor::OnFinishedStream( failed, std::move(job_->native_module_), job_->isolate_); cache_hit = prev_native_module != job_->native_module_.get(); } + // We finally call {Failed} or {FinishCompile}, which will invalidate the + // {AsyncCompileJob} and delete {this}. if (failed) { job_->Failed(); } else { job_->FinishCompile(cache_hit); } - // Calling either {Failed} or {FinishCompile} will invalidate the - // {AsyncCompileJob}. - job_ = nullptr; } } @@ -2927,9 +2921,8 @@ void AsyncStreamingProcessor::OnAbort() { // Clean up the temporary cache entry. GetWasmEngine()->StreamingCompilationFailed(prefix_hash_); } - // {Abort} invalidates the {AsyncCompileJob}. + // {Abort} invalidates the {AsyncCompileJob}, which in turn deletes {this}. job_->Abort(); - job_ = nullptr; } bool AsyncStreamingProcessor::Deserialize( @@ -2956,9 +2949,8 @@ bool AsyncStreamingProcessor::Deserialize( job_->isolate_->global_handles()->Create(*result.ToHandleChecked()); job_->native_module_ = job_->module_object_->shared_native_module(); job_->wire_bytes_ = ModuleWireBytes(job_->native_module_->wire_bytes()); + // Calling {FinishCompile} deletes the {AsyncCompileJob} and {this}. job_->FinishCompile(false); - // Calling {FinishCompile} invalidates the {AsyncCompileJob}. - job_ = nullptr; return true; } diff --git a/src/wasm/streaming-decoder.cc b/src/wasm/streaming-decoder.cc index 218c91698c..5af23d1dbc 100644 --- a/src/wasm/streaming-decoder.cc +++ b/src/wasm/streaming-decoder.cc @@ -28,16 +28,18 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder { AsyncStreamingDecoder(const AsyncStreamingDecoder&) = delete; AsyncStreamingDecoder& operator=(const AsyncStreamingDecoder&) = delete; - // The buffer passed into OnBytesReceived is owned by the caller. void OnBytesReceived(base::Vector bytes) override; void Finish(bool can_use_compiled_module) override; void Abort() override; - // Notify the StreamingDecoder that compilation ended and the - // StreamingProcessor should not be called anymore. - void NotifyCompilationEnded() override { Fail(); } + void NotifyCompilationDiscarded() override { + auto& active_processor = processor_ ? processor_ : failed_processor_; + active_processor.reset(); + DCHECK_NULL(processor_); + DCHECK_NULL(failed_processor_); + } void NotifyNativeModuleCreated( const std::shared_ptr& native_module) override; @@ -194,6 +196,8 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder { } void Fail() { + // {Fail} cannot be called after {Finish}, {Abort}, {Fail}, or + // {NotifyCompilationDiscarded}. DCHECK_EQ(processor_ == nullptr, failed_processor_ != nullptr); if (processor_ != nullptr) failed_processor_ = std::move(processor_); DCHECK_NULL(processor_); @@ -209,14 +213,14 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder { // As long as we did not detect an invalid module, {processor_} will be set. // On failure, the pointer is transferred to {failed_processor_} and will only - // be used for a final callback once all bytes have arrived. + // be used for a final callback once all bytes have arrived. Finally, both + // {processor_} and {failed_processor_} will be null. std::unique_ptr processor_; - std::unique_ptr failed_processor_{nullptr}; + std::unique_ptr failed_processor_; std::unique_ptr state_; std::vector> section_buffers_; bool code_section_processed_ = false; uint32_t module_offset_ = 0; - bool stream_finished_ = false; // TODO(clemensb): Avoid holding the wire bytes live twice (here and in the // section buffers). @@ -256,8 +260,9 @@ size_t AsyncStreamingDecoder::DecodingState::ReadBytes( void AsyncStreamingDecoder::Finish(bool can_use_compiled_module) { TRACE_STREAMING("Finish\n"); - CHECK(!stream_finished_); - stream_finished_ = true; + // {Finish} cannot be called after {Finish}, {Abort}, {Fail}, or + // {NotifyCompilationDiscarded}. + CHECK_EQ(processor_ == nullptr, failed_processor_ != nullptr); if (ok() && deserializing()) { // Try to deserialize the module from wire bytes and module bytes. if (can_use_compiled_module && @@ -284,19 +289,22 @@ void AsyncStreamingDecoder::Finish(bool can_use_compiled_module) { base::OwnedVector bytes_copy = base::OwnedVector::Of(full_wire_bytes_); - if (ok()) { - processor_->OnFinishedStream(std::move(bytes_copy), false); - } else { - failed_processor_->OnFinishedStream(std::move(bytes_copy), true); - } + // Calling {OnFinishedStream} calls out to JS. Avoid further callbacks (by + // aborting the stream) by resetting the processor field before calling + // {OnFinishedStream}. + const bool failed = !ok(); + std::unique_ptr processor = + failed ? std::move(failed_processor_) : std::move(processor_); + processor->OnFinishedStream(std::move(bytes_copy), failed); } void AsyncStreamingDecoder::Abort() { TRACE_STREAMING("Abort\n"); - if (stream_finished_) return; - stream_finished_ = true; + // Ignore {Abort} after {Finish}. + if (!processor_ && !failed_processor_) return; Fail(); failed_processor_->OnAbort(); + failed_processor_.reset(); } namespace { diff --git a/src/wasm/streaming-decoder.h b/src/wasm/streaming-decoder.h index e5f4306031..fe552d4365 100644 --- a/src/wasm/streaming-decoder.h +++ b/src/wasm/streaming-decoder.h @@ -79,9 +79,9 @@ class V8_EXPORT_PRIVATE StreamingDecoder { virtual void Abort() = 0; - // Notify the StreamingDecoder that compilation ended and the + // Notify the StreamingDecoder that the job was discarded and the // StreamingProcessor should not be called anymore. - virtual void NotifyCompilationEnded() = 0; + virtual void NotifyCompilationDiscarded() = 0; // Caching support. // Sets the callback that is called after a new chunk of the module is tiered diff --git a/src/wasm/sync-streaming-decoder.cc b/src/wasm/sync-streaming-decoder.cc index ad0ecbdd7d..e447289ade 100644 --- a/src/wasm/sync-streaming-decoder.cc +++ b/src/wasm/sync-streaming-decoder.cc @@ -77,7 +77,7 @@ class V8_EXPORT_PRIVATE SyncStreamingDecoder : public StreamingDecoder { buffer_.clear(); } - void NotifyCompilationEnded() override { buffer_.clear(); } + void NotifyCompilationDiscarded() override { buffer_.clear(); } void NotifyNativeModuleCreated( const std::shared_ptr&) override { diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc index 5c4d6ed8ac..7fdae33595 100644 --- a/src/wasm/wasm-js.cc +++ b/src/wasm/wasm-js.cc @@ -3181,6 +3181,19 @@ void WasmJs::InstallConditionalFeatures(Isolate* isolate, Handle context) { // This space left blank for future origin trials. } + +namespace wasm { +// static +std::unique_ptr StartStreamingForTesting( + Isolate* isolate, + std::shared_ptr resolver) { + return std::make_unique( + std::make_unique( + reinterpret_cast(isolate), "StartStreamingForTesting", + resolver)); +} +} // namespace wasm + #undef ASSIGN #undef EXTRACT_THIS diff --git a/src/wasm/wasm-js.h b/src/wasm/wasm-js.h index ce50b2822a..526f5fc932 100644 --- a/src/wasm/wasm-js.h +++ b/src/wasm/wasm-js.h @@ -9,16 +9,25 @@ #ifndef V8_WASM_WASM_JS_H_ #define V8_WASM_WASM_JS_H_ +#include + #include "src/common/globals.h" namespace v8 { -namespace internal { +class WasmStreaming; +} // namespace v8 + +namespace v8::internal { class Context; template class Handle; namespace wasm { +class CompilationResultResolver; class StreamingDecoder; + +V8_EXPORT_PRIVATE std::unique_ptr StartStreamingForTesting( + Isolate*, std::shared_ptr); } // namespace wasm // Exposes a WebAssembly API to JavaScript through the V8 API. @@ -31,7 +40,6 @@ class WasmJs { Isolate* isolate, Handle context); }; -} // namespace internal -} // namespace v8 +} // namespace v8::internal #endif // V8_WASM_WASM_JS_H_ diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status index 38d4a95433..8ee019a336 100644 --- a/test/cctest/cctest.status +++ b/test/cctest/cctest.status @@ -519,9 +519,7 @@ # TODO(v8:7777): Change this once wasm is supported in jitless mode. ['not has_webassembly or variant == jitless', { 'test-api/TurboAsmDisablesDetach': [SKIP], - 'test-api/WasmI32AtomicWaitCallback': [SKIP], - 'test-api/WasmI64AtomicWaitCallback': [SKIP], - 'test-api/WasmSetJitCodeEventHandler': [SKIP], + 'test-api/Wasm*': [SKIP], 'test-api-array-buffer/ArrayBuffer_NonDetachableWasDetached': [SKIP], 'test-backing-store/Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree': [SKIP], 'test-c-wasm-entry/*': [SKIP], diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index 49de9e3044..4dd9b07b9d 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -80,6 +80,7 @@ #include "test/common/flag-utils.h" #if V8_ENABLE_WEBASSEMBLY +#include "src/wasm/wasm-engine.h" #include "test/cctest/wasm/wasm-run-utils.h" #include "test/common/wasm/test-signatures.h" #include "test/common/wasm/wasm-macro-gen.h" @@ -22993,8 +22994,7 @@ void SourceURLHelper(v8::Isolate* isolate, const char* source_text, Local(), // source map URL false, // is opaque false, // is WASM - true // is ES Module - ); + true); // is ES Module v8::ScriptCompiler::Source source(source_str, origin, nullptr); Local module = @@ -29500,3 +29500,37 @@ UNINITIALIZED_TEST(OOMDetailsAreMovableAndCopyable) { UNINITIALIZED_TEST(JitCodeEventIsMovableAndCopyable) { TestCopyAndMoveConstructionAndAssignment(); } + +#if V8_ENABLE_WEBASSEMBLY +TEST(WasmAbortStreamingAfterContextDisposal) { + // This is a regression test for https://crbug.com/1403531. + + class Resolver final : public i::wasm::CompilationResultResolver { + public: + void OnCompilationSucceeded( + i::Handle result) override { + UNREACHABLE(); + } + void OnCompilationFailed(i::Handle error_reason) override { + UNREACHABLE(); + } + }; + + auto resolver = std::make_shared(); + + std::unique_ptr wasm_streaming; + v8::Isolate* isolate = CcTest::isolate(); + i::Isolate* i_isolate = reinterpret_cast(isolate); + { + v8::HandleScope scope(isolate); + LocalContext context; + + wasm_streaming = + i::wasm::StartStreamingForTesting(i_isolate, std::move(resolver)); + isolate->ContextDisposedNotification(false); + } + + wasm_streaming->Abort({}); + wasm_streaming.reset(); +} +#endif // V8_ENABLE_WEBASSEMBLY From 8e84e825edf693030c889f328316a44633ff35db Mon Sep 17 00:00:00 2001 From: Darius M Date: Thu, 5 Jan 2023 11:53:47 +0100 Subject: [PATCH 164/654] [maglev] Adapt CheckJSTypedArrayBounds for Float64Array Fixed: chromium:1405150, v8:13638 Bug: v8:7700 Change-Id: I0b53d6bbd43ff7e068d8d82edfe2d956bb398223 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4136729 Reviewed-by: Victor Gomes Commit-Queue: Darius Mercadier Cr-Commit-Position: refs/heads/main@{#85115} --- src/maglev/arm64/maglev-ir-arm64.cc | 5 +++-- src/maglev/x64/maglev-ir-x64.cc | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index c5a73de36a..91dc962f8f 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -1417,8 +1417,9 @@ void CheckJSTypedArrayBounds::GenerateCode(MaglevAssembler* masm, JSTypedArray::kRawByteLengthOffset); int element_size = ElementsKindSize(elements_kind_); if (element_size > 1) { - DCHECK(element_size == 2 || element_size == 4); - __ Cmp(byte_length, Operand(index, LSL, element_size / 2)); + DCHECK(element_size == 2 || element_size == 4 || element_size == 8); + __ Cmp(byte_length, + Operand(index, LSL, base::bits::CountTrailingZeros(element_size))); } else { __ Cmp(byte_length, index); } diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 6d51afb627..d8a23362c6 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -498,8 +498,8 @@ void CheckJSTypedArrayBounds::GenerateCode(MaglevAssembler* masm, JSTypedArray::kRawByteLengthOffset); int element_size = ElementsKindSize(elements_kind_); if (element_size > 1) { - DCHECK(element_size == 2 || element_size == 4); - __ shlq(index, Immediate(element_size / 2)); + DCHECK(element_size == 2 || element_size == 4 || element_size == 8); + __ shlq(index, Immediate(base::bits::CountTrailingZeros(element_size))); } __ cmpq(index, byte_length); // We use {above_equal} which does an unsigned comparison to handle negative From 912a05d7b0ea242a32340bb23aac93f003b6d7c8 Mon Sep 17 00:00:00 2001 From: Andreas Haas Date: Thu, 5 Jan 2023 09:26:57 +0100 Subject: [PATCH 165/654] [wasm][capi] Add missing Isolate::Scopes Missing Isolate::Scopes can cause the GC to fail. R=clemensb@chromium.org Bug: v8:12926 Change-Id: Iddfe73b4974d187261488189e55f0a6684ceb9ee Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4136720 Commit-Queue: Andreas Haas Reviewed-by: Clemens Backes Cr-Commit-Position: refs/heads/main@{#85116} --- src/wasm/c-api.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/wasm/c-api.cc b/src/wasm/c-api.cc index af8b6a555f..3636c2097c 100644 --- a/src/wasm/c-api.cc +++ b/src/wasm/c-api.cc @@ -889,6 +889,7 @@ class RefImpl { RefImpl* self = new (std::nothrow) RefImpl(); if (!self) return nullptr; i::Isolate* isolate = store->i_isolate(); + v8::Isolate::Scope isolate_scope(store->isolate()); self->val_ = isolate->global_handles()->Create(*obj); return make_own(seal(self)); } @@ -1215,6 +1216,7 @@ auto Module::deserialize(Store* store_abs, const vec& serialized) -> own { StoreImpl* store = impl(store_abs); i::Isolate* isolate = store->i_isolate(); + v8::Isolate::Scope isolate_scope(store->isolate()); i::HandleScope handle_scope(isolate); const byte_t* ptr = serialized.get(); uint64_t binary_size = ReadLebU64(&ptr); @@ -1725,6 +1727,7 @@ i::Address FuncData::v8_callback(i::Address host_data_foreign, i::Managed::cast(i::Object(host_data_foreign)).raw(); StoreImpl* store = impl(self->store); i::Isolate* isolate = store->i_isolate(); + v8::Isolate::Scope isolate_scope(store->isolate()); i::HandleScope scope(isolate); isolate->set_context(*v8::Utils::OpenHandle(*store->context())); @@ -1872,6 +1875,7 @@ auto Global::get() const -> Val { // TODO(7748): Handle types other than funcref and externref if needed. StoreImpl* store = impl(this)->store(); i::HandleScope scope(store->i_isolate()); + v8::Isolate::Scope isolate_scope(store->isolate()); i::Handle result = v8_global->GetRef(); if (result->IsWasmInternalFunction()) { result = From 167efb5974bd9f2bd20c5fd9911c51635a773486 Mon Sep 17 00:00:00 2001 From: Andreas Haas Date: Thu, 5 Jan 2023 14:25:59 +0100 Subject: [PATCH 166/654] [mjsunit] Avoid creating v8.prof file in regression test R=jgruber@chromium.org Bug: v8:12926 Change-Id: I565455068a385c708dce9406120de9ec3f893341 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4138257 Commit-Queue: Andreas Haas Reviewed-by: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85117} --- test/mjsunit/regress/regress-1394663.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/mjsunit/regress/regress-1394663.js b/test/mjsunit/regress/regress-1394663.js index 95489d6f4f..51cf42e127 100644 --- a/test/mjsunit/regress/regress-1394663.js +++ b/test/mjsunit/regress/regress-1394663.js @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +// Adding a listener so that {profileEnd} does not create a file on the disk. +d8.profiler.setOnProfileEndListener(() =>{}); + console.profile(); console.profileEnd(); console.profileEnd(); From a0ba7818cd7f7c2f9d6b5ddb77f3c24dfd65ecdc Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Thu, 5 Jan 2023 14:29:12 +0100 Subject: [PATCH 167/654] [string] Fix ConsStringIterator offset use ConsStringIterator::Next has an `offset` out parameter with non-obvious semantics -- namely, that the `offset` is the offset within the currently returned string matching the offset passed into the ConsStringIterator constructor. Notably, this will always be zero after the first iteration. Added a comment to explain this. This was being misused in string equality comparison, and in fact we can remove its use there entirely, as the only way to have a slice offset in string equality is to have a sliced string, which cannot point to a cons string. Change-Id: Idf9abc537220564ead0b056e9aff644d5c91426f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4138255 Auto-Submit: Leszek Swirski Reviewed-by: Jakob Linke Commit-Queue: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#85118} --- src/objects/string-comparator.cc | 5 ++++- src/objects/string-inl.h | 21 +++++++++++++-------- src/objects/string.h | 8 ++++++-- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/objects/string-comparator.cc b/src/objects/string-comparator.cc index 3330f772ff..70d47b36f3 100644 --- a/src/objects/string-comparator.cc +++ b/src/objects/string-comparator.cc @@ -16,7 +16,10 @@ void StringComparator::State::Init( if (!cons_string.is_null()) { int offset; string = iter_.Next(&offset); - String::VisitFlat(this, string, offset, access_guard); + // We are resetting the iterator with zero offset, so we should never have + // a per-segment offset. + DCHECK_EQ(offset, 0); + String::VisitFlat(this, string, 0, access_guard); } } diff --git a/src/objects/string-inl.h b/src/objects/string-inl.h index 22db7ff0f2..6851f38856 100644 --- a/src/objects/string-inl.h +++ b/src/objects/string-inl.h @@ -570,9 +570,11 @@ bool String::IsEqualToImpl( case kConsStringTag | kTwoByteStringTag: { // The ConsString path is more complex and rare, so call out to an // out-of-line handler. - return IsConsStringEqualToImpl(ConsString::cast(string), - slice_offset, str, cage_base, - access_guard); + // Slices cannot refer to ConsStrings, so there cannot be a non-zero + // slice offset here. + DCHECK_EQ(slice_offset, 0); + return IsConsStringEqualToImpl(ConsString::cast(string), str, + cage_base, access_guard); } case kThinStringTag | kOneByteStringTag: @@ -589,17 +591,20 @@ bool String::IsEqualToImpl( // static template bool String::IsConsStringEqualToImpl( - ConsString string, int slice_offset, base::Vector str, - PtrComprCageBase cage_base, + ConsString string, base::Vector str, PtrComprCageBase cage_base, const SharedStringAccessGuardIfNeeded& access_guard) { // Already checked the len in IsEqualToImpl. Check GE rather than EQ in case // this is a prefix check. DCHECK_GE(string.length(), str.size()); - ConsStringIterator iter(ConsString::cast(string), slice_offset); + ConsStringIterator iter(ConsString::cast(string)); base::Vector remaining_str = str; - for (String segment = iter.Next(&slice_offset); !segment.is_null(); - segment = iter.Next(&slice_offset)) { + int offset; + for (String segment = iter.Next(&offset); !segment.is_null(); + segment = iter.Next(&offset)) { + // We create the iterator without an offset, so we should never have a + // per-segment offset. + DCHECK_EQ(offset, 0); // Compare the individual segment against the appropriate subvector of the // remaining string. size_t len = std::min(segment.length(), remaining_str.size()); diff --git a/src/objects/string.h b/src/objects/string.h index 771a2a6567..a20e6039dd 100644 --- a/src/objects/string.h +++ b/src/objects/string.h @@ -627,7 +627,7 @@ class String : public TorqueGeneratedString { // Out-of-line IsEqualToImpl for ConsString. template V8_NOINLINE static bool IsConsStringEqualToImpl( - ConsString string, int slice_offset, base::Vector str, + ConsString string, base::Vector str, PtrComprCageBase cage_base, const SharedStringAccessGuardIfNeeded& access_guard); @@ -1090,7 +1090,11 @@ class ConsStringIterator { if (cons_string.is_null()) return; Initialize(cons_string, offset); } - // Returns nullptr when complete. + // Returns nullptr when complete. The offset_out parameter will be set to the + // offset within the returned segment that the user should start looking at, + // to match the offset passed into the constructor or Reset -- this will only + // be non-zero immediately after construction or Reset, and only if those had + // a non-zero offset. inline String Next(int* offset_out) { *offset_out = 0; if (depth_ == 0) return String(); From 1ef0a093e8000931dcccbcf3cd8afe4421609fa7 Mon Sep 17 00:00:00 2001 From: Manos Koukoutos Date: Thu, 5 Jan 2023 14:31:49 +0100 Subject: [PATCH 168/654] [wasm-gc] Apply isorecursive canonicalization to tag signatures We add a {canonical_type_index} field to tag objects and use it to check for canonical subtyping between tags when needed. Bug: v8:7748 Change-Id: I60723d8f72a9487af03f223c8f8a33ef8fa56461 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135885 Commit-Queue: Manos Koukoutos Reviewed-by: Thibaud Michaud Cr-Commit-Position: refs/heads/main@{#85119} --- src/wasm/module-decoder-impl.h | 13 ++++++----- src/wasm/module-instantiate.cc | 9 ++++++-- src/wasm/wasm-js.cc | 9 +++++--- src/wasm/wasm-module.h | 4 +++- src/wasm/wasm-objects.cc | 22 +++++++++---------- src/wasm/wasm-objects.h | 5 ++++- src/wasm/wasm-objects.tq | 1 + test/cctest/wasm/wasm-run-utils.cc | 2 +- .../wasm/function-body-decoder-unittest.cc | 2 +- .../unittests/wasm/module-decoder-unittest.cc | 2 +- 10 files changed, 41 insertions(+), 28 deletions(-) diff --git a/src/wasm/module-decoder-impl.h b/src/wasm/module-decoder-impl.h index c2d8c36d9c..b7338bcb7d 100644 --- a/src/wasm/module-decoder-impl.h +++ b/src/wasm/module-decoder-impl.h @@ -839,8 +839,8 @@ class ModuleDecoderTemplate : public Decoder { module_->num_imported_tags++; const WasmTagSig* tag_sig = nullptr; consume_exception_attribute(); // Attribute ignored for now. - consume_tag_sig_index(module_.get(), &tag_sig); - module_->tags.emplace_back(tag_sig); + uint32_t sig_index = consume_tag_sig_index(module_.get(), &tag_sig); + module_->tags.emplace_back(tag_sig, sig_index); break; } default: @@ -1559,8 +1559,8 @@ class ModuleDecoderTemplate : public Decoder { tracer_.TagOffset(pc_offset()); const WasmTagSig* tag_sig = nullptr; consume_exception_attribute(); // Attribute ignored for now. - consume_tag_sig_index(module_.get(), &tag_sig); - module_->tags.emplace_back(tag_sig); + uint32_t sig_index = consume_tag_sig_index(module_.get(), &tag_sig); + module_->tags.emplace_back(tag_sig, sig_index); } } @@ -1771,8 +1771,9 @@ class ModuleDecoderTemplate : public Decoder { uint32_t sig_index = consume_u32v("signature index"); tracer_.Bytes(pos, static_cast(pc_ - pos)); if (!module->has_signature(sig_index)) { - errorf(pos, "signature index %u out of bounds (%d signatures)", sig_index, - static_cast(module->types.size())); + errorf(pos, "no signature at index %u (%d %s)", sig_index, + static_cast(module->types.size()), + enabled_features_.has_gc() ? "types" : "signatures"); *sig = nullptr; return 0; } diff --git a/src/wasm/module-instantiate.cc b/src/wasm/module-instantiate.cc index d5c66c073a..afccc77e4b 100644 --- a/src/wasm/module-instantiate.cc +++ b/src/wasm/module-instantiate.cc @@ -1692,7 +1692,9 @@ int InstanceBuilder::ProcessImports(Handle instance) { return -1; } Handle imported_tag = Handle::cast(value); - if (!imported_tag->MatchesSignature(module_->tags[import.index].sig)) { + if (!imported_tag->MatchesSignature( + module_->isorecursive_canonical_type_ids + [module_->tags[import.index].sig_index])) { ReportLinkError("imported tag does not match the expected type", index, module_name, import_name); return -1; @@ -1906,7 +1908,10 @@ void InstanceBuilder::ProcessExports(Handle instance) { Handle tag_object( HeapObject::cast(instance->tags_table().get(exp.index)), isolate_); - wrapper = WasmTagObject::New(isolate_, tag.sig, tag_object); + uint32_t canonical_sig_index = + module_->isorecursive_canonical_type_ids[tag.sig_index]; + wrapper = WasmTagObject::New(isolate_, tag.sig, canonical_sig_index, + tag_object); tags_wrappers_[exp.index] = wrapper; } desc.set_value(wrapper); diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc index 7fdae33595..638535bce4 100644 --- a/src/wasm/wasm-js.cc +++ b/src/wasm/wasm-js.cc @@ -1669,8 +1669,12 @@ void WebAssemblyTag(const v8::FunctionCallbackInfo& args) { // Set the tag index to 0. It is only used for debugging purposes, and has no // meaningful value when declared outside of a wasm module. auto tag = i::WasmExceptionTag::New(i_isolate, 0); + + uint32_t canonical_type_index = + i::wasm::GetWasmEngine()->type_canonicalizer()->AddRecursiveGroup(&sig); + i::Handle tag_object = - i::WasmTagObject::New(i_isolate, &sig, tag); + i::WasmTagObject::New(i_isolate, &sig, canonical_type_index, tag); args.GetReturnValue().Set(Utils::ToLocal(tag_object)); } @@ -1709,8 +1713,7 @@ uint32_t GetEncodedSize(i::Handle tag_object) { i::wasm::WasmTagSig sig{0, static_cast(serialized_sig.length()), reinterpret_cast( serialized_sig.GetDataStartAddress())}; - i::wasm::WasmTag tag(&sig); - return i::WasmExceptionPackage::GetEncodedSize(&tag); + return i::WasmExceptionPackage::GetEncodedSize(&sig); } void EncodeExceptionValues(v8::Isolate* isolate, diff --git a/src/wasm/wasm-module.h b/src/wasm/wasm-module.h index a6a6094cf5..aa52f8306f 100644 --- a/src/wasm/wasm-module.h +++ b/src/wasm/wasm-module.h @@ -90,10 +90,12 @@ using WasmTagSig = FunctionSig; // Static representation of a wasm tag type. struct WasmTag { - explicit WasmTag(const WasmTagSig* sig) : sig(sig) {} + explicit WasmTag(const WasmTagSig* sig, uint32_t sig_index) + : sig(sig), sig_index(sig_index) {} const FunctionSig* ToFunctionSig() const { return sig; } const WasmTagSig* sig; // type signature of the tag. + uint32_t sig_index; }; // Static representation of a wasm literal stringref. diff --git a/src/wasm/wasm-objects.cc b/src/wasm/wasm-objects.cc index 4aa3603d3a..ae5685526f 100644 --- a/src/wasm/wasm-objects.cc +++ b/src/wasm/wasm-objects.cc @@ -1585,6 +1585,7 @@ void WasmArray::SetTaggedElement(uint32_t index, Handle value, // static Handle WasmTagObject::New(Isolate* isolate, const wasm::FunctionSig* sig, + uint32_t canonical_type_index, Handle tag) { Handle tag_cons(isolate->native_context()->wasm_tag_constructor(), isolate); @@ -1604,23 +1605,16 @@ Handle WasmTagObject::New(Isolate* isolate, isolate->factory()->NewJSObject(tag_cons, AllocationType::kOld); Handle tag_wrapper = Handle::cast(tag_object); tag_wrapper->set_serialized_signature(*serialized_sig); + tag_wrapper->set_canonical_type_index(canonical_type_index); tag_wrapper->set_tag(*tag); return tag_wrapper; } // TODO(7748): Integrate this with type canonicalization. -bool WasmTagObject::MatchesSignature(const wasm::FunctionSig* sig) { - DCHECK_EQ(0, sig->return_count()); - DCHECK_LE(sig->parameter_count(), std::numeric_limits::max()); - int sig_size = static_cast(sig->parameter_count()); - if (sig_size != serialized_signature().length()) return false; - for (int index = 0; index < sig_size; ++index) { - if (sig->GetParam(index) != serialized_signature().get(index)) { - return false; - } - } - return true; +bool WasmTagObject::MatchesSignature(uint32_t expected_canonical_type_index) { + return wasm::GetWasmEngine()->type_canonicalizer()->IsCanonicalSubtype( + this->canonical_type_index(), expected_canonical_type_index); } const wasm::FunctionSig* WasmCapiFunction::GetSignature(Zone* zone) const { @@ -1826,7 +1820,11 @@ size_t ComputeEncodedElementSize(wasm::ValueType type) { // static uint32_t WasmExceptionPackage::GetEncodedSize(const wasm::WasmTag* tag) { - const wasm::WasmTagSig* sig = tag->sig; + return GetEncodedSize(tag->sig); +} + +// static +uint32_t WasmExceptionPackage::GetEncodedSize(const wasm::WasmTagSig* sig) { uint32_t encoded_size = 0; for (size_t i = 0; i < sig->parameter_count(); ++i) { switch (sig->GetParam(i).kind()) { diff --git a/src/wasm/wasm-objects.h b/src/wasm/wasm-objects.h index ebf4c09421..0a884de122 100644 --- a/src/wasm/wasm-objects.h +++ b/src/wasm/wasm-objects.h @@ -37,6 +37,7 @@ struct WasmFunction; struct WasmGlobal; struct WasmModule; struct WasmTag; +using WasmTagSig = FunctionSig; class WasmValue; class WireBytesRef; } // namespace wasm @@ -551,10 +552,11 @@ class WasmTagObject public: // Checks whether the given {sig} has the same parameter types as the // serialized signature stored within this tag object. - bool MatchesSignature(const wasm::FunctionSig* sig); + bool MatchesSignature(uint32_t expected_canonical_type_index); static Handle New(Isolate* isolate, const wasm::FunctionSig* sig, + uint32_t canonical_type_index, Handle tag); TQ_OBJECT_CONSTRUCTORS(WasmTagObject) @@ -579,6 +581,7 @@ class V8_EXPORT_PRIVATE WasmExceptionPackage : public JSObject { Isolate* isolate, Handle exception_package); // Determines the size of the array holding all encoded exception values. + static uint32_t GetEncodedSize(const wasm::WasmTagSig* tag); static uint32_t GetEncodedSize(const wasm::WasmTag* tag); DECL_CAST(WasmExceptionPackage) diff --git a/src/wasm/wasm-objects.tq b/src/wasm/wasm-objects.tq index ebbef918fc..8fae6b46ab 100644 --- a/src/wasm/wasm-objects.tq +++ b/src/wasm/wasm-objects.tq @@ -171,6 +171,7 @@ extern class WasmGlobalObject extends JSObject { extern class WasmTagObject extends JSObject { serialized_signature: PodArrayOfWasmValueType; tag: HeapObject; + canonical_type_index: Smi; } type WasmExportedFunction extends JSFunction; diff --git a/test/cctest/wasm/wasm-run-utils.cc b/test/cctest/wasm/wasm-run-utils.cc index 84398f4d57..3f977aea61 100644 --- a/test/cctest/wasm/wasm-run-utils.cc +++ b/test/cctest/wasm/wasm-run-utils.cc @@ -292,7 +292,7 @@ uint32_t TestingModuleBuilder::AddBytes(base::Vector bytes) { uint32_t TestingModuleBuilder::AddException(const FunctionSig* sig) { DCHECK_EQ(0, sig->return_count()); uint32_t index = static_cast(test_module_->tags.size()); - test_module_->tags.push_back(WasmTag{sig}); + test_module_->tags.emplace_back(sig, AddSignature(sig)); Handle tag = WasmExceptionTag::New(isolate_, index); Handle table(instance_object_->tags_table(), isolate_); table = isolate_->factory()->CopyFixedArrayAndGrow(table, 1); diff --git a/test/unittests/wasm/function-body-decoder-unittest.cc b/test/unittests/wasm/function-body-decoder-unittest.cc index 7c74c6a72e..d552d9cdee 100644 --- a/test/unittests/wasm/function-body-decoder-unittest.cc +++ b/test/unittests/wasm/function-body-decoder-unittest.cc @@ -107,7 +107,7 @@ class TestModuleBuilder { return result; } byte AddException(WasmTagSig* sig) { - mod.tags.emplace_back(sig); + mod.tags.emplace_back(sig, AddSignature(sig)); CHECK_LE(mod.types.size(), kMaxByteSizedLeb128); return static_cast(mod.tags.size() - 1); } diff --git a/test/unittests/wasm/module-decoder-unittest.cc b/test/unittests/wasm/module-decoder-unittest.cc index 37b7c405a1..5912d971ef 100644 --- a/test/unittests/wasm/module-decoder-unittest.cc +++ b/test/unittests/wasm/module-decoder-unittest.cc @@ -1165,7 +1165,7 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_index) { SIG_INDEX(23)))}; // except[0] (sig#23 [out-of-bounds]) // Should fail decoding exception section. ModuleResult result = DecodeModule(base::ArrayVector(data)); - EXPECT_NOT_OK(result, "signature index 23 out of bounds"); + EXPECT_NOT_OK(result, "no signature at index 23 (1 signatures)"); } TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_return) { From 4c4661350563c70a486c87f6bb797486bea26229 Mon Sep 17 00:00:00 2001 From: Andreas Haas Date: Mon, 2 Jan 2023 15:14:41 +0100 Subject: [PATCH 169/654] Reland "[wasm][capi] Optimize all functions before serialization" This CL is exactly the same as the original CL, without changes. The issue was a missing Isolate::Scope, and it existed already before this CL. I fixed the issue separately in https://crrev.com/c/4136720. Original message: Original change's description: > [wasm][capi] Optimize all functions before serialization > > The existing implementation of `serialize` in the C-API is to produce > a snapshot of the current state of the `NativeModule`. However, so > far all users of `serialize` did not care about the runtime of > `serialize`, but cared about `deserialize` starting up fast. > > With this CL all functions of a module get tiered up to TurboFan > before serializing the module. R=clemensb@chromium.org Change-Id: Ib8ed33c63c137e167fb50ccf721184b2b16cf4d4 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4131635 Commit-Queue: Andreas Haas Reviewed-by: Clemens Backes Cr-Commit-Position: refs/heads/main@{#85120} --- src/wasm/c-api.cc | 14 +++--- src/wasm/compilation-environment.h | 2 + src/wasm/function-compiler.cc | 4 +- src/wasm/function-compiler.h | 2 +- src/wasm/module-compiler.cc | 48 ++++++++++++++++++++- src/wasm/wasm-engine.cc | 5 ++- src/wasm/wasm-engine.h | 2 +- test/cctest/wasm/test-wasm-shared-engine.cc | 2 +- 8 files changed, 65 insertions(+), 14 deletions(-) diff --git a/src/wasm/c-api.cc b/src/wasm/c-api.cc index 3636c2097c..67f1f1d80a 100644 --- a/src/wasm/c-api.cc +++ b/src/wasm/c-api.cc @@ -1180,13 +1180,13 @@ auto Module::exports() const -> ownvec { return ExportsImpl(impl(this)->v8_object()); } -// We serialize the state of the module when calling this method; an arbitrary -// number of functions can be tiered up to TurboFan, and only those will be -// serialized. -// The caller is responsible for "warming up" the module before serializing. +// We tier up all functions to TurboFan, and then serialize all TurboFan code. +// If no TurboFan code existed before calling this function, then the call to +// {serialize} may take a long time. auto Module::serialize() const -> vec { i::wasm::NativeModule* native_module = impl(this)->v8_object()->native_module(); + native_module->compilation_state()->TierUpAllFunctions(); v8::base::Vector wire_bytes = native_module->wire_bytes(); size_t binary_size = wire_bytes.size(); i::wasm::WasmSerializer serializer(native_module); @@ -1201,8 +1201,10 @@ auto Module::serialize() const -> vec { ptr += binary_size; if (!serializer.SerializeNativeModule( {reinterpret_cast(ptr), serial_size})) { - // Serialization failed, because no TurboFan code is present yet. In this - // case, the serialized module just contains the wire bytes. + // Serialization fails if no TurboFan code is present. This may happen + // because the module does not have any functions, or because another thread + // modifies the {NativeModule} concurrently. In this case, the serialized + // module just contains the wire bytes. buffer = vec::make_uninitialized(size_size + binary_size); byte_t* ptr = buffer.get(); i::wasm::LEBHelper::write_u64v(reinterpret_cast(&ptr), diff --git a/src/wasm/compilation-environment.h b/src/wasm/compilation-environment.h index 60908bdae4..0e41ee1953 100644 --- a/src/wasm/compilation-environment.h +++ b/src/wasm/compilation-environment.h @@ -172,6 +172,8 @@ class V8_EXPORT_PRIVATE CompilationState { // Set a higher priority for the compilation job. void SetHighPriority(); + void TierUpAllFunctions(); + bool failed() const; bool baseline_compilation_finished() const; diff --git a/src/wasm/function-compiler.cc b/src/wasm/function-compiler.cc index 20828eb0dc..42039d75e2 100644 --- a/src/wasm/function-compiler.cc +++ b/src/wasm/function-compiler.cc @@ -158,7 +158,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation( } // static -void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate, +void WasmCompilationUnit::CompileWasmFunction(Counters* counters, NativeModule* native_module, WasmFeatures* detected, const WasmFunction* function, @@ -174,7 +174,7 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate, CompilationEnv env = native_module->CreateCompilationEnv(); WasmCompilationResult result = unit.ExecuteCompilation( &env, native_module->compilation_state()->GetWireBytesStorage().get(), - isolate->counters(), nullptr, detected); + counters, nullptr, detected); if (result.succeeded()) { WasmCodeRefScope code_ref_scope; native_module->PublishCode( diff --git a/src/wasm/function-compiler.h b/src/wasm/function-compiler.h index 5798982ad4..a10927ecf2 100644 --- a/src/wasm/function-compiler.h +++ b/src/wasm/function-compiler.h @@ -76,7 +76,7 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final { ForDebugging for_debugging() const { return for_debugging_; } int func_index() const { return func_index_; } - static void CompileWasmFunction(Isolate*, NativeModule*, + static void CompileWasmFunction(Counters*, NativeModule*, WasmFeatures* detected, const WasmFunction*, ExecutionTier); diff --git a/src/wasm/module-compiler.cc b/src/wasm/module-compiler.cc index d817ff931e..023e1321a7 100644 --- a/src/wasm/module-compiler.cc +++ b/src/wasm/module-compiler.cc @@ -631,6 +631,8 @@ class CompilationStateImpl { compile_job_->UpdatePriority(TaskPriority::kUserBlocking); } + void TierUpAllFunctions(); + bool failed() const { return compile_failed_.load(std::memory_order_relaxed); } @@ -836,6 +838,10 @@ void CompilationState::AddCallback( void CompilationState::SetHighPriority() { Impl(this)->SetHighPriority(); } +void CompilationState::TierUpAllFunctions() { + Impl(this)->TierUpAllFunctions(); +} + void CompilationState::InitializeAfterDeserialization( base::Vector lazy_functions, base::Vector eager_functions) { @@ -1410,7 +1416,8 @@ void TierUpNowForTesting(Isolate* isolate, WasmInstanceObject instance, TransitiveTypeFeedbackProcessor::Process(instance, func_index); } auto* native_module = instance.module_object().native_module(); - wasm::GetWasmEngine()->CompileFunction(isolate, native_module, func_index, + wasm::GetWasmEngine()->CompileFunction(isolate->counters(), native_module, + func_index, wasm::ExecutionTier::kTurbofan); CHECK(!native_module->compilation_state()->failed()); } @@ -3690,6 +3697,45 @@ void CompilationStateImpl::WaitForCompilationEvent( semaphore->Wait(); } +void CompilationStateImpl::TierUpAllFunctions() { + const WasmModule* module = native_module_->module(); + uint32_t num_wasm_functions = module->num_declared_functions; + WasmCodeRefScope code_ref_scope; + CompilationUnitBuilder builder(native_module_); + for (uint32_t i = 0; i < num_wasm_functions; ++i) { + int func_index = module->num_imported_functions + i; + WasmCode* code = native_module_->GetCode(func_index); + if (!code || !code->is_turbofan()) { + builder.AddTopTierUnit(func_index, ExecutionTier::kTurbofan); + } + } + builder.Commit(); + + // Join the compilation, until no compilation units are left anymore. + class DummyDelegate final : public JobDelegate { + bool ShouldYield() override { return false; } + bool IsJoiningThread() const override { return true; } + void NotifyConcurrencyIncrease() override { UNIMPLEMENTED(); } + uint8_t GetTaskId() override { return kMainTaskId; } + }; + + DummyDelegate delegate; + ExecuteCompilationUnits(native_module_weak_, async_counters_.get(), &delegate, + kBaselineOrTopTier); + + // We cannot wait for other compilation threads to finish, so we explicitly + // compile all functions which are not yet available as TurboFan code. + for (uint32_t i = 0; i < num_wasm_functions; ++i) { + uint32_t func_index = module->num_imported_functions + i; + WasmCode* code = native_module_->GetCode(func_index); + if (!code || !code->is_turbofan()) { + wasm::GetWasmEngine()->CompileFunction(async_counters_.get(), + native_module_, func_index, + wasm::ExecutionTier::kTurbofan); + } + } +} + namespace { using JSToWasmWrapperQueue = WrapperQueue>; diff --git a/src/wasm/wasm-engine.cc b/src/wasm/wasm-engine.cc index 1d230ca1f7..6bfecc3e24 100644 --- a/src/wasm/wasm-engine.cc +++ b/src/wasm/wasm-engine.cc @@ -705,12 +705,13 @@ std::shared_ptr WasmEngine::StartStreamingCompilation( isolate, enabled, context, api_method_name, std::move(resolver)); } -void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module, +void WasmEngine::CompileFunction(Counters* counters, + NativeModule* native_module, uint32_t function_index, ExecutionTier tier) { // Note we assume that "one-off" compilations can discard detected features. WasmFeatures detected = WasmFeatures::None(); WasmCompilationUnit::CompileWasmFunction( - isolate, native_module, &detected, + counters, native_module, &detected, &native_module->module()->functions[function_index], tier); } diff --git a/src/wasm/wasm-engine.h b/src/wasm/wasm-engine.h index 709443d09b..29a0fea4f3 100644 --- a/src/wasm/wasm-engine.h +++ b/src/wasm/wasm-engine.h @@ -202,7 +202,7 @@ class V8_EXPORT_PRIVATE WasmEngine { // Compiles the function with the given index at a specific compilation tier. // Errors are stored internally in the CompilationState. // This is mostly used for testing to force a function into a specific tier. - void CompileFunction(Isolate* isolate, NativeModule* native_module, + void CompileFunction(Counters* counters, NativeModule* native_module, uint32_t function_index, ExecutionTier tier); void EnterDebuggingForIsolate(Isolate* isolate); diff --git a/test/cctest/wasm/test-wasm-shared-engine.cc b/test/cctest/wasm/test-wasm-shared-engine.cc index 0c76e6d21f..2591cf92d1 100644 --- a/test/cctest/wasm/test-wasm-shared-engine.cc +++ b/test/cctest/wasm/test-wasm-shared-engine.cc @@ -301,7 +301,7 @@ TEST(SharedEngineRunThreadedTierUp) { Handle instance = isolate->ImportInstance(module); WasmFeatures detected = WasmFeatures::None(); WasmCompilationUnit::CompileWasmFunction( - isolate->isolate(), module.get(), &detected, + isolate->isolate()->counters(), module.get(), &detected, &module->module()->functions[0], ExecutionTier::kTurbofan); CHECK_EQ(23, isolate->Run(instance)); }); From 5c613b9887273936b8778e93720ea41458eb1c2a Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Wed, 4 Jan 2023 16:25:28 +0100 Subject: [PATCH 170/654] [maglev][arm64] Fix push/pop register list order Maglev assumes a fixed register order (from low to high) when iterating the frame, since it identifies tagged values using a bitmap. Bug: v8:7700 Change-Id: I2231b111b30068eeff408e8ceea896cb17e4b864 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135892 Commit-Queue: Leszek Swirski Auto-Submit: Victor Gomes Reviewed-by: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#85121} --- src/codegen/arm64/macro-assembler-arm64.cc | 42 ++++++++++++++++++++++ src/codegen/arm64/macro-assembler-arm64.h | 19 +++------- 2 files changed, 47 insertions(+), 14 deletions(-) diff --git a/src/codegen/arm64/macro-assembler-arm64.cc b/src/codegen/arm64/macro-assembler-arm64.cc index 57a658a83a..bd8b6561ea 100644 --- a/src/codegen/arm64/macro-assembler-arm64.cc +++ b/src/codegen/arm64/macro-assembler-arm64.cc @@ -97,6 +97,48 @@ void TurboAssembler::PopCPURegList(CPURegList registers) { } } +void MacroAssembler::PushAll(RegList reglist) { + if (reglist.Count() % 2 != 0) { + DCHECK(!reglist.has(xzr)); + reglist.set(xzr); + } + + CPURegList registers(kXRegSizeInBits, reglist); + int size = registers.RegisterSizeInBytes(); + DCHECK_EQ(0, (size * registers.Count()) % 16); + + // If LR was stored here, we would need to sign it if + // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on. + DCHECK(!registers.IncludesAliasOf(lr)); + + while (!registers.IsEmpty()) { + const CPURegister& src0 = registers.PopLowestIndex(); + const CPURegister& src1 = registers.PopLowestIndex(); + stp(src1, src0, MemOperand(sp, -2 * size, PreIndex)); + } +} + +void MacroAssembler::PopAll(RegList reglist) { + if (reglist.Count() % 2 != 0) { + DCHECK(!reglist.has(xzr)); + reglist.set(xzr); + } + + CPURegList registers(kXRegSizeInBits, reglist); + int size = registers.RegisterSizeInBytes(); + DCHECK_EQ(0, (size * registers.Count()) % 16); + + // If LR was loaded here, we would need to authenticate it if + // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on. + DCHECK(!registers.IncludesAliasOf(lr)); + + while (!registers.IsEmpty()) { + const CPURegister& dst0 = registers.PopHighestIndex(); + const CPURegister& dst1 = registers.PopHighestIndex(); + ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex)); + } +} + int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) const { auto list = kCallerSaved; diff --git a/src/codegen/arm64/macro-assembler-arm64.h b/src/codegen/arm64/macro-assembler-arm64.h index 89be4d2f49..e5f87cb025 100644 --- a/src/codegen/arm64/macro-assembler-arm64.h +++ b/src/codegen/arm64/macro-assembler-arm64.h @@ -1813,20 +1813,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { PopSizeRegList(regs, kSRegSizeInBits); } - inline void PushAll(RegList registers) { - if (registers.Count() % 2 != 0) { - DCHECK(!registers.has(xzr)); - registers.set(xzr); - } - PushXRegList(registers); - } - inline void PopAll(RegList registers) { - if (registers.Count() % 2 != 0) { - DCHECK(!registers.has(xzr)); - registers.set(xzr); - } - PopXRegList(registers); - } + // These PushAll/PopAll respect the order of the registers in the stack from + // low index to high. + void PushAll(RegList registers); + void PopAll(RegList registers); + inline void PushAll(DoubleRegList registers, int stack_slot_size = kDoubleSize) { if (registers.Count() % 2 != 0) { From 362e792ee4ef98eb0f8dba6f2619cfd67c36df76 Mon Sep 17 00:00:00 2001 From: Choongwoo Han Date: Tue, 3 Jan 2023 14:55:47 -0800 Subject: [PATCH 171/654] [wasm] Do not build loop exits for non-innermost loops Loops can be unrolled only for innermost loops. But, the wasm graph builder builds loop exits regardless of the condition. This CL detects if the loop can be innermost using AnalyzeLoopAssignment, and do not allocate unnecessary nodes if it can't be. This reduces memory usage for the reported wasm binary from 1.3GB to 300MB. Bug: v8:13543 Change-Id: I693800071f7eee4a9991e094830f23d27a96b13f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4134466 Reviewed-by: Manos Koukoutos Commit-Queue: Choongwoo Han Cr-Commit-Position: refs/heads/main@{#85122} --- src/wasm/function-body-decoder-impl.h | 6 ++- src/wasm/function-body-decoder.cc | 5 ++- src/wasm/function-body-decoder.h | 3 +- src/wasm/graph-builder-interface.cc | 44 +++++++++---------- .../wasm/loop-assignment-analysis-unittest.cc | 29 +++++++++++- 5 files changed, 58 insertions(+), 29 deletions(-) diff --git a/src/wasm/function-body-decoder-impl.h b/src/wasm/function-body-decoder-impl.h index 9e2ee57ab3..c8b35739a2 100644 --- a/src/wasm/function-body-decoder-impl.h +++ b/src/wasm/function-body-decoder-impl.h @@ -1422,7 +1422,8 @@ class WasmDecoder : public Decoder { // position at the end of the vector represents possible assignments to // the instance cache. static BitVector* AnalyzeLoopAssignment(WasmDecoder* decoder, const byte* pc, - uint32_t locals_count, Zone* zone) { + uint32_t locals_count, Zone* zone, + bool* loop_is_innermost = nullptr) { if (pc >= decoder->end()) return nullptr; if (*pc != kExprLoop) return nullptr; // The number of locals_count is augmented by 1 so that the 'locals_count' @@ -1430,11 +1431,14 @@ class WasmDecoder : public Decoder { BitVector* assigned = zone->New(locals_count + 1, zone); int depth = -1; // We will increment the depth to 0 when we decode the // starting 'loop' opcode. + if (loop_is_innermost) *loop_is_innermost = true; // Iteratively process all AST nodes nested inside the loop. while (pc < decoder->end() && VALIDATE(decoder->ok())) { WasmOpcode opcode = static_cast(*pc); switch (opcode) { case kExprLoop: + if (loop_is_innermost && depth >= 0) *loop_is_innermost = false; + V8_FALLTHROUGH; case kExprIf: case kExprBlock: case kExprTry: diff --git a/src/wasm/function-body-decoder.cc b/src/wasm/function-body-decoder.cc index 6e571ae7db..2a0233c58a 100644 --- a/src/wasm/function-body-decoder.cc +++ b/src/wasm/function-body-decoder.cc @@ -325,12 +325,13 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body, } BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, uint32_t num_locals, - const byte* start, const byte* end) { + const byte* start, const byte* end, + bool* loop_is_innermost) { WasmFeatures no_features = WasmFeatures::None(); WasmDecoder decoder( zone, nullptr, no_features, &no_features, nullptr, start, end, 0); return WasmDecoder::AnalyzeLoopAssignment( - &decoder, start, num_locals, zone); + &decoder, start, num_locals, zone, loop_is_innermost); } } // namespace wasm diff --git a/src/wasm/function-body-decoder.h b/src/wasm/function-body-decoder.h index 7337b77fdc..34d941819d 100644 --- a/src/wasm/function-body-decoder.h +++ b/src/wasm/function-body-decoder.h @@ -81,7 +81,8 @@ V8_EXPORT_PRIVATE bool ValidateAndDecodeLocalDeclsForTesting( const byte* start, const byte* end, Zone* zone); V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting( - Zone* zone, uint32_t num_locals, const byte* start, const byte* end); + Zone* zone, uint32_t num_locals, const byte* start, const byte* end, + bool* loop_is_innermost); // Computes the length of the opcode at the given address. V8_EXPORT_PRIVATE unsigned OpcodeLength(const byte* pc, const byte* end); diff --git a/src/wasm/graph-builder-interface.cc b/src/wasm/graph-builder-interface.cc index c6b05ad035..7575237920 100644 --- a/src/wasm/graph-builder-interface.cc +++ b/src/wasm/graph-builder-interface.cc @@ -145,9 +145,9 @@ class WasmGraphBuildingInterface { SsaEnv* block_env = nullptr; // environment that dies with this block. TryInfo* try_info = nullptr; // information about try statements. int32_t previous_catch = -1; // previous Control with a catch. + bool loop_innermost = false; // whether this loop can be innermost. BitVector* loop_assignments = nullptr; // locals assigned in this loop. TFNode* loop_node = nullptr; // loop header of this loop. - DISALLOW_IMPLICIT_CONSTRUCTORS(Control); template explicit Control(Args&&... args) V8_NOEXCEPT @@ -159,6 +159,7 @@ class WasmGraphBuildingInterface { block_env(other.block_env), try_info(other.try_info), previous_catch(other.previous_catch), + loop_innermost(other.loop_innermost), loop_assignments(other.loop_assignments), loop_node(other.loop_node) { // The `control_` vector in WasmFullDecoder calls destructor of this when @@ -173,6 +174,7 @@ class WasmGraphBuildingInterface { if (block_env) block_env->Kill(); if (try_info) try_info->catch_env->Kill(); } + DISALLOW_IMPLICIT_CONSTRUCTORS(Control); }; WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder, @@ -292,25 +294,6 @@ class WasmGraphBuildingInterface { TFNode* loop_node = builder_->Loop(control()); - if (emit_loop_exits()) { - uint32_t nesting_depth = 0; - for (uint32_t depth = 1; depth < decoder->control_depth(); depth++) { - if (decoder->control_at(depth)->is_loop()) { - nesting_depth++; - } - } - // If this loop is nested, the parent loop's can_be_innermost field needs - // to be false. If the last loop in loop_infos_ has less depth, it has to - // be the parent loop. If it does not, it means another loop has been - // found within the parent loop, and that loop will have set the parent's - // can_be_innermost to false, so we do not need to do anything. - if (nesting_depth > 0 && - loop_infos_.back().nesting_depth < nesting_depth) { - loop_infos_.back().can_be_innermost = false; - } - loop_infos_.emplace_back(loop_node, nesting_depth, true); - } - builder_->SetControl(loop_node); decoder->control_at(0)->loop_node = loop_node; @@ -319,8 +302,10 @@ class WasmGraphBuildingInterface { builder_->TerminateLoop(effect(), control()); // Doing a preprocessing pass to analyze loop assignments seems to pay off // compared to reallocating Nodes when rearranging Phis in Goto. + bool can_be_innermost = false; BitVector* assigned = WasmDecoder::AnalyzeLoopAssignment( - decoder, decoder->pc(), decoder->num_locals(), decoder->zone()); + decoder, decoder->pc(), decoder->num_locals(), decoder->zone(), + &can_be_innermost); if (decoder->failed()) return; int instance_cache_index = decoder->num_locals(); // If the module has shared memory, the stack guard might reallocate the @@ -331,6 +316,19 @@ class WasmGraphBuildingInterface { DCHECK_NOT_NULL(assigned); decoder->control_at(0)->loop_assignments = assigned; + if (emit_loop_exits()) { + uint32_t nesting_depth = 0; + for (uint32_t depth = 1; depth < decoder->control_depth(); depth++) { + if (decoder->control_at(depth)->is_loop()) { + nesting_depth++; + } + } + loop_infos_.emplace_back(loop_node, nesting_depth, can_be_innermost); + // Only innermost loops can be unrolled. We can avoid allocating + // unnecessary nodes if this loop can not be innermost. + decoder->control_at(0)->loop_innermost = can_be_innermost; + } + // Only introduce phis for variables assigned in this loop. for (int i = decoder->num_locals() - 1; i >= 0; i--) { if (!assigned->Contains(i)) continue; @@ -414,7 +412,7 @@ class WasmGraphBuildingInterface { // However, if loop unrolling is enabled, we must create a loop exit and // wrap the fallthru values on the stack. if (block->is_loop()) { - if (emit_loop_exits() && block->reachable()) { + if (emit_loop_exits() && block->reachable() && block->loop_innermost) { BuildLoopExits(decoder, block); WrapLocalsAtLoopExit(decoder, block); uint32_t arity = block->end_merge.arity; @@ -2304,7 +2302,7 @@ class WasmGraphBuildingInterface { break; } } - if (control != nullptr) { + if (control != nullptr && control->loop_innermost) { BuildLoopExits(decoder, control); for (Value& value : stack_values) { if (value.node != nullptr) { diff --git a/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/test/unittests/wasm/loop-assignment-analysis-unittest.cc index 65945932ae..2e7c75c1c9 100644 --- a/test/unittests/wasm/loop-assignment-analysis-unittest.cc +++ b/test/unittests/wasm/loop-assignment-analysis-unittest.cc @@ -26,8 +26,10 @@ class WasmLoopAssignmentAnalyzerTest : public TestWithZone { TestSignatures sigs; uint32_t num_locals; - BitVector* Analyze(const byte* start, const byte* end) { - return AnalyzeLoopAssignmentForTesting(zone(), num_locals, start, end); + BitVector* Analyze(const byte* start, const byte* end, + bool* loop_is_innermost = nullptr) { + return AnalyzeLoopAssignmentForTesting(zone(), num_locals, start, end, + loop_is_innermost); } }; @@ -175,6 +177,29 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Loop2) { } } +TEST_F(WasmLoopAssignmentAnalyzerTest, NestedLoop) { + num_locals = 5; + byte code[] = {WASM_LOOP(WASM_LOOP(WASM_LOCAL_SET(0, 1)))}; + + bool outer_is_innermost = false; + BitVector* outer_assigned = + Analyze(code, code + arraysize(code), &outer_is_innermost); + for (int j = 0; j < outer_assigned->length(); j++) { + bool expected = j == 0; + EXPECT_EQ(expected, outer_assigned->Contains(j)); + } + EXPECT_FALSE(outer_is_innermost); + + bool inner_is_innermost = false; + BitVector* inner_assigned = + Analyze(code + 2, code + arraysize(code), &inner_is_innermost); + for (int j = 0; j < inner_assigned->length(); j++) { + bool expected = j == 0; + EXPECT_EQ(expected, inner_assigned->Contains(j)); + } + EXPECT_TRUE(inner_is_innermost); +} + TEST_F(WasmLoopAssignmentAnalyzerTest, Malformed) { byte code[] = {kExprLoop, kVoidCode, kExprF32Neg, kExprBrTable, 0x0E, 'h', 'e', 'l', 'l', 'o', ',', ' ', From 4d716b2ffb838cbaed300db02a594d142697b660 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Thu, 5 Jan 2023 19:10:49 -0800 Subject: [PATCH 172/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/33bb56b..c2ac4bf Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/038b25e..e5bf2c4 Rolling v8/third_party/depot_tools: https://chromium.googlesource.com/chromium/tools/depot_tools/+log/58a343c..50985d5 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20230104.1.1..version:11.20230105.2.1 Rolling v8/tools/clang: https://chromium.googlesource.com/chromium/src/tools/clang/+log/3b54a13..5c711ec Change-Id: I6538f8982f85f23fb540217d52aee55f142895f8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4141158 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85123} --- DEPS | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/DEPS b/DEPS index eb10fbc34b..5488b9edfa 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20230104.1.1', + 'fuchsia_version': 'version:11.20230105.2.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68e6038b5350cba18c341cc7c572170af5c5b20c', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '33bb56b5658abbf77eb54898fb66ee5df3450723', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'c2ac4bfe4b2229bed7890e8b25f5a6f65d8d7693', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf8d11e41138ca777f3eaa09df41bf968c8be6ba', 'buildtools/clang_format/script': @@ -209,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '038b25e395cafe0dc95b8139295eab6441315cc8', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'e5bf2c49f5ae06d3be53808f31f3bc8f67824a8c', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -217,7 +217,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '58a343c88bda7ef0b65ad9bdf208b9307446dfbe', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '50985d5fddc0db97da6bfa457379fdabeac00195', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { @@ -272,7 +272,7 @@ deps = { 'third_party/zlib': Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'fa5dc4750029333c2486efa38eec7d13890108ed', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '3b54a131a94f1345579c9d92b08c2b45c43cfe77', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '5c711ec7ead844108b414b5ddb366db6a622f895', 'tools/luci-go': { 'packages': [ { From 3c79a842837c32dfd0aa1a77b0c719b4d2bd4c98 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Fri, 6 Jan 2023 19:11:29 -0800 Subject: [PATCH 173/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/c2ac4bf..dac6050 Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/e5bf2c4..163b421 Rolling v8/third_party/depot_tools: https://chromium.googlesource.com/chromium/tools/depot_tools/+log/50985d5..6f90547 Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20230105.2.1..version:11.20230106.2.1 Rolling v8/tools/clang: https://chromium.googlesource.com/chromium/src/tools/clang/+log/5c711ec..e8c31f9 Change-Id: I96f80a73c7886aa6beba8250309f417deb648dc8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4143946 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85124} --- DEPS | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/DEPS b/DEPS index 5488b9edfa..ddd0a16cf1 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20230105.2.1', + 'fuchsia_version': 'version:11.20230106.2.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68e6038b5350cba18c341cc7c572170af5c5b20c', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'c2ac4bfe4b2229bed7890e8b25f5a6f65d8d7693', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'dac6050ddba0f21f3a5c1dc13d43f78ba03b047f', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf8d11e41138ca777f3eaa09df41bf968c8be6ba', 'buildtools/clang_format/script': @@ -209,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'e5bf2c49f5ae06d3be53808f31f3bc8f67824a8c', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '163b42131719ee189bcf1398d35e43f43fd2b0b5', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -217,7 +217,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '50985d5fddc0db97da6bfa457379fdabeac00195', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '6f905470df9197880d645cfade6310b1cf1fb238', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { @@ -272,7 +272,7 @@ deps = { 'third_party/zlib': Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'fa5dc4750029333c2486efa38eec7d13890108ed', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '5c711ec7ead844108b414b5ddb366db6a622f895', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'e8c31f937f2872dc18d4bd9a74f3a8d6a4c78ade', 'tools/luci-go': { 'packages': [ { From d26949217580819901944b3baa4f073543414681 Mon Sep 17 00:00:00 2001 From: Frank Tang Date: Tue, 27 Dec 2022 17:53:53 -0800 Subject: [PATCH 174/654] [test262] Roll test262 https://chromium.googlesource.com/external/github.com/tc39/test262/+log/e6c6460a5b9..f00d4118d Bug: v8:7834 Change-Id: I02cecbc0d74ee2904d3d5d9d9f94f182a88b0cf6 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4126701 Reviewed-by: Shu-yu Guo Commit-Queue: Frank Tang Cr-Commit-Position: refs/heads/main@{#85125} --- DEPS | 2 +- test/test262/test262.status | 20 +++++++++++++++----- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/DEPS b/DEPS index ddd0a16cf1..55d68bd58e 100644 --- a/DEPS +++ b/DEPS @@ -161,7 +161,7 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'e6c6460a5b94e32e01ce9a9d236f3148d4648ce5', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f00d4118dba5d266d1611ba2cd4e995d3e4b523a', 'third_party/android_ndk': { 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d', 'condition': 'checkout_android', diff --git a/test/test262/test262.status b/test/test262/test262.status index ec65e9b554..e33742636f 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -221,7 +221,6 @@ 'intl402/DateTimeFormat/prototype/formatRangeToParts/temporal-objects-resolved-time-zone': [FAIL], 'intl402/DateTimeFormat/prototype/format/temporal-objects-resolved-time-zone': [FAIL], 'intl402/DateTimeFormat/prototype/formatToParts/temporal-objects-resolved-time-zone': [FAIL], - 'staging/Intl402/Temporal/old/time-toLocaleString': [FAIL], # https://bugs.chromium.org/p/v8/issues/detail?id=7831 'language/statements/generators/generator-created-after-decl-inst': [FAIL], @@ -310,6 +309,13 @@ 'built-ins/RegExp/prototype/Symbol.replace/get-flags-err': [FAIL], 'built-ins/RegExp/prototype/Symbol.replace/get-unicode-error': [FAIL], + # https://bugs.chromium.org/p/v8/issues/detail?id=13321 + 'built-ins/Array/fromAsync/builtin': [FAIL], + 'built-ins/Array/fromAsync/length': [FAIL], + 'built-ins/Array/fromAsync/name': [FAIL], + 'built-ins/Array/fromAsync/not-a-constructor': [FAIL], + 'built-ins/Array/fromAsync/prop-desc': [FAIL], + # https://bugs.chromium.org/p/v8/issues/detail?id=11544 'built-ins/Temporal/Duration/prototype/total/balance-negative-result': [FAIL], 'intl402/Temporal/Calendar/prototype/dateFromFields/infinity-throws-rangeerror': [FAIL], @@ -982,6 +988,14 @@ 'intl402/Temporal/Calendar/prototype/eraYear/argument-string-date-with-utc-offset': [FAIL], 'intl402/Temporal/Calendar/prototype/yearOfWeek/infinity-throws-rangeerror': [FAIL], + 'built-ins/Temporal/Calendar/prototype/yearOfWeek/not-a-constructor': [FAIL], + 'built-ins/Temporal/Duration/prototype/add/relativeto-zoneddatetime-nanoseconds-to-days-range-errors': [FAIL], + 'built-ins/Temporal/Duration/prototype/round/relativeto-zoneddatetime-nanoseconds-to-days-range-errors': [FAIL], + 'built-ins/Temporal/Duration/prototype/subtract/relativeto-zoneddatetime-nanoseconds-to-days-range-errors': [FAIL], + 'built-ins/Temporal/ZonedDateTime/prototype/since/nanoseconds-to-days-range-errors': [FAIL], + 'built-ins/Temporal/ZonedDateTime/prototype/until/nanoseconds-to-days-range-errors': [FAIL], + + # https://bugs.chromium.org/p/v8/issues/detail?id=13584 'intl402/Array/prototype/toLocaleString/invoke-element-tolocalestring': [FAIL], @@ -998,10 +1012,6 @@ # https://github.com/tc39/proposal-intl-duration-format/issues/114 'intl402/DurationFormat/prototype/format/style-options-en': [FAIL], - # https://github.com/tc39/test262/pull/3752 - 'intl402/DurationFormat/prototype/format/invalid-arguments-throws': [FAIL], - 'intl402/DurationFormat/prototype/formatToParts/invalid-arguments-throws': [FAIL], - # https://bugs.chromium.org/p/v8/issues/detail?id=12763 'language/expressions/class/decorator/syntax/class-valid/decorator-member-expr-private-identifier': [FAIL], 'language/expressions/class/decorator/syntax/valid/decorator-call-expr-identifier-reference': [FAIL], From 922fa2f9ee8e594a1d1aaa47522d4a4a55e8306f Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Sat, 7 Jan 2023 19:26:24 -0800 Subject: [PATCH 175/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/dac6050..705c30a Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20230106.2.1..version:11.20230107.0.1 Change-Id: Ib856262f50acce14f20a07b0c1227b73ff749e3f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4143948 Bot-Commit: v8-ci-autoroll-builder Commit-Queue: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85126} --- DEPS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DEPS b/DEPS index 55d68bd58e..c0bd4e2e95 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20230106.2.1', + 'fuchsia_version': 'version:11.20230107.0.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68e6038b5350cba18c341cc7c572170af5c5b20c', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'dac6050ddba0f21f3a5c1dc13d43f78ba03b047f', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '705c30a79a4ae545b6d5b757915040bc82bef0cd', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf8d11e41138ca777f3eaa09df41bf968c8be6ba', 'buildtools/clang_format/script': From 31ccfed4612593af31df105adfd26724c1bb1e0b Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Thu, 5 Jan 2023 17:37:44 +0100 Subject: [PATCH 176/654] [x64] Add support for "cold calls" in hot paths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes (specially annotated) calls to "cold functions" in hot paths more efficient by hiding the fact that we are actually calling a function here. Clang would otherwise unconditionally spill and reload registers that might be clobbered by the call. This would slow down the fast path. This CL allows to reverse priorities here: The fast path can stay fast (no spills and loads), but the slow path gets even slower. The inline assembly that implements the cold call spills and reloads *all* registers, because we do not know which registers are in use in the scope where the cold call is being emitted. I.e. this behaves like a custom calling convention with no caller-saved registers. The `preserve_all` attribute (experimental in clang, and incomplete for C++) would also solve this, but it is not production-ready yet (leads to crashes of clang and crashes of the generated code). R=leszeks@chromium.org CC=​dlehmann@chromium.org Bug: v8:13565, v8:13570 Change-Id: I2b54a480da1c689113a67c601c29d73239b0ff2b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4116584 Commit-Queue: Clemens Backes Reviewed-by: Anton Bikineev Reviewed-by: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#85127} --- BUILD.bazel | 2 + BUILD.gn | 2 + src/base/call_cold.cc | 58 +++++++++++++++ src/base/call_cold.h | 103 ++++++++++++++++++++++++++ src/codegen/x64/assembler-x64.h | 6 +- src/wasm/function-body-decoder-impl.h | 5 +- 6 files changed, 174 insertions(+), 2 deletions(-) create mode 100644 src/base/call_cold.cc create mode 100644 src/base/call_cold.h diff --git a/BUILD.bazel b/BUILD.bazel index d6792860e6..6227092b18 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -595,6 +595,8 @@ filegroup( "src/base/bounded-page-allocator.h", "src/base/bounds.h", "src/base/build_config.h", + "src/base/call_cold.cc", + "src/base/call_cold.h", "src/base/compiler-specific.h", "src/base/container-utils.h", "src/base/cpu.cc", diff --git a/BUILD.gn b/BUILD.gn index e7b5853d0c..cd3ce11ba4 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -5491,6 +5491,8 @@ v8_component("v8_libbase") { "src/base/bounded-page-allocator.h", "src/base/bounds.h", "src/base/build_config.h", + "src/base/call_cold.cc", + "src/base/call_cold.h", "src/base/compiler-specific.h", "src/base/container-utils.h", "src/base/cpu.cc", diff --git a/src/base/call_cold.cc b/src/base/call_cold.cc new file mode 100644 index 0000000000..148ed3d50d --- /dev/null +++ b/src/base/call_cold.cc @@ -0,0 +1,58 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/call_cold.h" + +namespace v8::base { + +#if V8_HOST_ARCH_X64 && (defined(__clang__) || defined(__GNUC__)) +asm(".globl v8_base_call_cold\n" + "v8_base_call_cold:\n" + " push %rbp\n" + " mov %rsp, %rbp\n" + // Push all non-clobbered registers, except for callee-saved ones. The + // compiler does not even know that it is executing a call, so we can not + // clobber any register, not the registers holding the function address or + // the arguments. + " push %rax\n" + " push %rcx\n" + " push %rdx\n" +#ifndef V8_OS_WIN + // %rsi and %rdi are callee-saved on Windows. + " push %rsi\n" + " push %rdi\n" +#endif // !V8_OS_WIN + " push %r8\n" + " push %r9\n" + " push %r10\n" + " push %r11\n" + // Save %rsp to %r15 (after pushing it) and align %rsp to 16 bytes. + // %r15 is callee-saved, so the value will still be there after the call. + " push %r15\n" + " mov %rsp, %r15\n" + " and $-16, %rsp\n" + // Now execute the actual call. + " call *%rax\n" + // Restore the potentially unaligned %rsp. + " mov %r15, %rsp\n" + // Pop the previously pushed registers. We have no return value, so we do + // not need to preserve %rax. + " pop %r15\n" + " pop %r11\n" + " pop %r10\n" + " pop %r9\n" + " pop %r8\n" +#ifndef V8_OS_WIN + " pop %rdi\n" + " pop %rsi\n" +#endif // !V8_OS_WIN + " pop %rdx\n" + " pop %rcx\n" + " pop %rax\n" + // Leave the frame and return. + " pop %rbp\n" + " ret"); +#endif + +} // namespace v8::base diff --git a/src/base/call_cold.h b/src/base/call_cold.h new file mode 100644 index 0000000000..1fa7d9fff4 --- /dev/null +++ b/src/base/call_cold.h @@ -0,0 +1,103 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_CALL_COLD_H_ +#define V8_BASE_CALL_COLD_H_ + +#include + +#include "include/v8config.h" + +namespace v8::base { + +// Use {call_cold} for calls in hot paths that are unlikely to be executed. The +// compiler will not know that this executes a call, so it will not clobber any +// registers (i.e. this behaves like a custom calling convention where all +// registers are callee-save). +// Executing the call will be significantly slower then without going through +// {call_cold}, as all register will have to be spilled and an indirect call is +// being executed. + +// As a start, we added support for GCC and clang on x64. Other platforms can +// be added later, as needed. + +template +constexpr bool IsValidForCallCold = + // The callable object must be convertible to a function pointer (e.g. a + // capture-less lambda). + std::is_convertible_v && + // All parameters must be integral (support for floating-point arguments is + // not implemented). + (... && (std::is_integral_v || std::is_pointer_v)); + +// Do not use V8_CC_GNU, as this is not defined for clang on Windows. Explicitly +// check for GCC or clang. +#if V8_HOST_ARCH_X64 && (defined(__clang__) || defined(__GNUC__)) + +// Define the parameter registers. Windows uses a different calling convention +// than other OSes. +#define REG_FN "rax" +#ifdef V8_OS_WIN +#define REG_P1 "rcx" +#define REG_P2 "rdx" +#define REG_P3 "r8" +#else +#define REG_P1 "rdi" +#define REG_P2 "rsi" +#define REG_P3 "rdx" +#endif +// We clobber all xmm registers so we do not have to spill and reload them. +#define CLOBBER \ + "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", \ + "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", \ + "xmm15", "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm6", "st", \ + "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)" +#define V8_CALL_COLD_ASM \ + "sub $128, %%rsp\n" /* Bump %rsp by 128, beyond the red zone. */ \ + "call v8_base_call_cold\n" /* Call our trampoline. */ \ + "add $128, %%rsp" /* Restore previous %rsp. */ + +// 1 Parameter, no result. +template +V8_INLINE void call_cold(const Fn& fn, P1 p1) { + static_assert(IsValidForCallCold); + using FnPtr = void (*)(P1); + register FnPtr fn_reg asm(REG_FN) = fn; + register P1 p1_reg asm(REG_P1) = p1; + asm(V8_CALL_COLD_ASM : : "r"(fn_reg), "r"(p1_reg) : CLOBBER); +} + +// 3 Parameters, no result. +template +V8_INLINE void call_cold(const Fn& fn, P1 p1, P2 p2, P3 p3) { + static_assert(IsValidForCallCold); + using FnPtr = void (*)(P1, P2, P3); + register FnPtr fn_reg asm(REG_FN) = fn; + register P1 p1_reg asm(REG_P1) = p1; + register P2 p2_reg asm(REG_P2) = p2; + register P3 p3_reg asm(REG_P3) = p3; + asm(V8_CALL_COLD_ASM + : + : "r"(fn_reg), "r"(p1_reg), "r"(p2_reg), "r"(p3_reg) + : CLOBBER); +} + +#else +// Architectures without special support just execute the call directly. +template +V8_INLINE void call_cold(const Fn& fn, Ps... ps) { + static_assert(IsValidForCallCold); + fn(ps...); +} +#endif + +#undef REG_P1 +#undef REG_P2 +#undef REG_P3 +#undef CLOBBER +#undef V8_CALL_COLD_ASM + +} // namespace v8::base + +#endif // V8_BASE_CALL_COLD_H_ diff --git a/src/codegen/x64/assembler-x64.h b/src/codegen/x64/assembler-x64.h index afc5798482..60b6e35e67 100644 --- a/src/codegen/x64/assembler-x64.h +++ b/src/codegen/x64/assembler-x64.h @@ -42,6 +42,7 @@ #include #include +#include "src/base/call_cold.h" #include "src/base/export-template.h" #include "src/codegen/assembler.h" #include "src/codegen/cpu-features.h" @@ -2640,7 +2641,10 @@ void Assembler::vinstr(byte op, YMMRegister dst, XMMRegister src1, class EnsureSpace { public: explicit V8_INLINE EnsureSpace(Assembler* assembler) : assembler_(assembler) { - if (V8_UNLIKELY(assembler_->buffer_overflow())) assembler_->GrowBuffer(); + if (V8_UNLIKELY(assembler_->buffer_overflow())) { + base::call_cold([](Assembler* assembler) { assembler->GrowBuffer(); }, + assembler_); + } #ifdef DEBUG space_before_ = assembler_->available_space(); #endif diff --git a/src/wasm/function-body-decoder-impl.h b/src/wasm/function-body-decoder-impl.h index c8b35739a2..61477a9cfc 100644 --- a/src/wasm/function-body-decoder-impl.h +++ b/src/wasm/function-body-decoder-impl.h @@ -16,6 +16,7 @@ #include +#include "src/base/call_cold.h" #include "src/base/small-vector.h" #include "src/base/strings.h" #include "src/base/v8-fallthrough.h" @@ -1256,7 +1257,9 @@ class FastZoneVector { V8_INLINE void EnsureMoreCapacity(int slots_needed, Zone* zone) { if (V8_LIKELY(capacity_end_ - end_ >= slots_needed)) return; - Grow(slots_needed, zone); + base::call_cold([](FastZoneVector* vec, int slots_needed, + Zone* zone) { vec->Grow(slots_needed, zone); }, + this, slots_needed, zone); } private: From aa5f2e5c43359d731a485b5af668a90886e6ae75 Mon Sep 17 00:00:00 2001 From: Clemens Backes Date: Sun, 8 Jan 2023 21:01:14 +0000 Subject: [PATCH 177/654] Revert "[x64] Add support for "cold calls" in hot paths" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 31ccfed4612593af31df105adfd26724c1bb1e0b. Reason for revert: Fails compilation on: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20-%20cfi%20-%20builder/6527/overview Original change's description: > [x64] Add support for "cold calls" in hot paths > > This makes (specially annotated) calls to "cold functions" in hot paths > more efficient by hiding the fact that we are actually calling a > function here. Clang would otherwise unconditionally spill and reload > registers that might be clobbered by the call. This would slow down the > fast path. > > This CL allows to reverse priorities here: The fast path can stay fast > (no spills and loads), but the slow path gets even slower. The inline > assembly that implements the cold call spills and reloads *all* > registers, because we do not know which registers are in use in the > scope where the cold call is being emitted. > > I.e. this behaves like a custom calling convention with no caller-saved > registers. > > The `preserve_all` attribute (experimental in clang, and incomplete for > C++) would also solve this, but it is not production-ready yet (leads to > crashes of clang and crashes of the generated code). > > R=​leszeks@chromium.org > CC=​​dlehmann@chromium.org > > Bug: v8:13565, v8:13570 > Change-Id: I2b54a480da1c689113a67c601c29d73239b0ff2b > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4116584 > Commit-Queue: Clemens Backes > Reviewed-by: Anton Bikineev > Reviewed-by: Leszek Swirski > Cr-Commit-Position: refs/heads/main@{#85127} Bug: v8:13565, v8:13570 Change-Id: I2f5b3343eb372fea13d2c4ab6354f2bc52e2c338 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4145819 Bot-Commit: Rubber Stamper Auto-Submit: Clemens Backes Commit-Queue: Rubber Stamper Cr-Commit-Position: refs/heads/main@{#85128} --- BUILD.bazel | 2 - BUILD.gn | 2 - src/base/call_cold.cc | 58 --------------- src/base/call_cold.h | 103 -------------------------- src/codegen/x64/assembler-x64.h | 6 +- src/wasm/function-body-decoder-impl.h | 5 +- 6 files changed, 2 insertions(+), 174 deletions(-) delete mode 100644 src/base/call_cold.cc delete mode 100644 src/base/call_cold.h diff --git a/BUILD.bazel b/BUILD.bazel index 6227092b18..d6792860e6 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -595,8 +595,6 @@ filegroup( "src/base/bounded-page-allocator.h", "src/base/bounds.h", "src/base/build_config.h", - "src/base/call_cold.cc", - "src/base/call_cold.h", "src/base/compiler-specific.h", "src/base/container-utils.h", "src/base/cpu.cc", diff --git a/BUILD.gn b/BUILD.gn index cd3ce11ba4..e7b5853d0c 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -5491,8 +5491,6 @@ v8_component("v8_libbase") { "src/base/bounded-page-allocator.h", "src/base/bounds.h", "src/base/build_config.h", - "src/base/call_cold.cc", - "src/base/call_cold.h", "src/base/compiler-specific.h", "src/base/container-utils.h", "src/base/cpu.cc", diff --git a/src/base/call_cold.cc b/src/base/call_cold.cc deleted file mode 100644 index 148ed3d50d..0000000000 --- a/src/base/call_cold.cc +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2023 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/base/call_cold.h" - -namespace v8::base { - -#if V8_HOST_ARCH_X64 && (defined(__clang__) || defined(__GNUC__)) -asm(".globl v8_base_call_cold\n" - "v8_base_call_cold:\n" - " push %rbp\n" - " mov %rsp, %rbp\n" - // Push all non-clobbered registers, except for callee-saved ones. The - // compiler does not even know that it is executing a call, so we can not - // clobber any register, not the registers holding the function address or - // the arguments. - " push %rax\n" - " push %rcx\n" - " push %rdx\n" -#ifndef V8_OS_WIN - // %rsi and %rdi are callee-saved on Windows. - " push %rsi\n" - " push %rdi\n" -#endif // !V8_OS_WIN - " push %r8\n" - " push %r9\n" - " push %r10\n" - " push %r11\n" - // Save %rsp to %r15 (after pushing it) and align %rsp to 16 bytes. - // %r15 is callee-saved, so the value will still be there after the call. - " push %r15\n" - " mov %rsp, %r15\n" - " and $-16, %rsp\n" - // Now execute the actual call. - " call *%rax\n" - // Restore the potentially unaligned %rsp. - " mov %r15, %rsp\n" - // Pop the previously pushed registers. We have no return value, so we do - // not need to preserve %rax. - " pop %r15\n" - " pop %r11\n" - " pop %r10\n" - " pop %r9\n" - " pop %r8\n" -#ifndef V8_OS_WIN - " pop %rdi\n" - " pop %rsi\n" -#endif // !V8_OS_WIN - " pop %rdx\n" - " pop %rcx\n" - " pop %rax\n" - // Leave the frame and return. - " pop %rbp\n" - " ret"); -#endif - -} // namespace v8::base diff --git a/src/base/call_cold.h b/src/base/call_cold.h deleted file mode 100644 index 1fa7d9fff4..0000000000 --- a/src/base/call_cold.h +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2023 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_BASE_CALL_COLD_H_ -#define V8_BASE_CALL_COLD_H_ - -#include - -#include "include/v8config.h" - -namespace v8::base { - -// Use {call_cold} for calls in hot paths that are unlikely to be executed. The -// compiler will not know that this executes a call, so it will not clobber any -// registers (i.e. this behaves like a custom calling convention where all -// registers are callee-save). -// Executing the call will be significantly slower then without going through -// {call_cold}, as all register will have to be spilled and an indirect call is -// being executed. - -// As a start, we added support for GCC and clang on x64. Other platforms can -// be added later, as needed. - -template -constexpr bool IsValidForCallCold = - // The callable object must be convertible to a function pointer (e.g. a - // capture-less lambda). - std::is_convertible_v && - // All parameters must be integral (support for floating-point arguments is - // not implemented). - (... && (std::is_integral_v || std::is_pointer_v)); - -// Do not use V8_CC_GNU, as this is not defined for clang on Windows. Explicitly -// check for GCC or clang. -#if V8_HOST_ARCH_X64 && (defined(__clang__) || defined(__GNUC__)) - -// Define the parameter registers. Windows uses a different calling convention -// than other OSes. -#define REG_FN "rax" -#ifdef V8_OS_WIN -#define REG_P1 "rcx" -#define REG_P2 "rdx" -#define REG_P3 "r8" -#else -#define REG_P1 "rdi" -#define REG_P2 "rsi" -#define REG_P3 "rdx" -#endif -// We clobber all xmm registers so we do not have to spill and reload them. -#define CLOBBER \ - "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", \ - "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", \ - "xmm15", "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm6", "st", \ - "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)" -#define V8_CALL_COLD_ASM \ - "sub $128, %%rsp\n" /* Bump %rsp by 128, beyond the red zone. */ \ - "call v8_base_call_cold\n" /* Call our trampoline. */ \ - "add $128, %%rsp" /* Restore previous %rsp. */ - -// 1 Parameter, no result. -template -V8_INLINE void call_cold(const Fn& fn, P1 p1) { - static_assert(IsValidForCallCold); - using FnPtr = void (*)(P1); - register FnPtr fn_reg asm(REG_FN) = fn; - register P1 p1_reg asm(REG_P1) = p1; - asm(V8_CALL_COLD_ASM : : "r"(fn_reg), "r"(p1_reg) : CLOBBER); -} - -// 3 Parameters, no result. -template -V8_INLINE void call_cold(const Fn& fn, P1 p1, P2 p2, P3 p3) { - static_assert(IsValidForCallCold); - using FnPtr = void (*)(P1, P2, P3); - register FnPtr fn_reg asm(REG_FN) = fn; - register P1 p1_reg asm(REG_P1) = p1; - register P2 p2_reg asm(REG_P2) = p2; - register P3 p3_reg asm(REG_P3) = p3; - asm(V8_CALL_COLD_ASM - : - : "r"(fn_reg), "r"(p1_reg), "r"(p2_reg), "r"(p3_reg) - : CLOBBER); -} - -#else -// Architectures without special support just execute the call directly. -template -V8_INLINE void call_cold(const Fn& fn, Ps... ps) { - static_assert(IsValidForCallCold); - fn(ps...); -} -#endif - -#undef REG_P1 -#undef REG_P2 -#undef REG_P3 -#undef CLOBBER -#undef V8_CALL_COLD_ASM - -} // namespace v8::base - -#endif // V8_BASE_CALL_COLD_H_ diff --git a/src/codegen/x64/assembler-x64.h b/src/codegen/x64/assembler-x64.h index 60b6e35e67..afc5798482 100644 --- a/src/codegen/x64/assembler-x64.h +++ b/src/codegen/x64/assembler-x64.h @@ -42,7 +42,6 @@ #include #include -#include "src/base/call_cold.h" #include "src/base/export-template.h" #include "src/codegen/assembler.h" #include "src/codegen/cpu-features.h" @@ -2641,10 +2640,7 @@ void Assembler::vinstr(byte op, YMMRegister dst, XMMRegister src1, class EnsureSpace { public: explicit V8_INLINE EnsureSpace(Assembler* assembler) : assembler_(assembler) { - if (V8_UNLIKELY(assembler_->buffer_overflow())) { - base::call_cold([](Assembler* assembler) { assembler->GrowBuffer(); }, - assembler_); - } + if (V8_UNLIKELY(assembler_->buffer_overflow())) assembler_->GrowBuffer(); #ifdef DEBUG space_before_ = assembler_->available_space(); #endif diff --git a/src/wasm/function-body-decoder-impl.h b/src/wasm/function-body-decoder-impl.h index 61477a9cfc..c8b35739a2 100644 --- a/src/wasm/function-body-decoder-impl.h +++ b/src/wasm/function-body-decoder-impl.h @@ -16,7 +16,6 @@ #include -#include "src/base/call_cold.h" #include "src/base/small-vector.h" #include "src/base/strings.h" #include "src/base/v8-fallthrough.h" @@ -1257,9 +1256,7 @@ class FastZoneVector { V8_INLINE void EnsureMoreCapacity(int slots_needed, Zone* zone) { if (V8_LIKELY(capacity_end_ - end_ >= slots_needed)) return; - base::call_cold([](FastZoneVector* vec, int slots_needed, - Zone* zone) { vec->Grow(slots_needed, zone); }, - this, slots_needed, zone); + Grow(slots_needed, zone); } private: From e6902daebfce639b4e48388f5cec2dcd478fa7e0 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Sun, 8 Jan 2023 19:27:09 -0800 Subject: [PATCH 178/654] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/705c30a..7ab406c Rolling v8/third_party/fuchsia-sdk/sdk: version:11.20230107.0.1..version:11.20230108.3.1 Change-Id: Id52793459a5ae65df95837c5f66ef36c5fafe3f5 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4143951 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#85129} --- DEPS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DEPS b/DEPS index c0bd4e2e95..c786b8c5d9 100644 --- a/DEPS +++ b/DEPS @@ -65,7 +65,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20230107.0.1', + 'fuchsia_version': 'version:11.20230108.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -105,7 +105,7 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68e6038b5350cba18c341cc7c572170af5c5b20c', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '705c30a79a4ae545b6d5b757915040bc82bef0cd', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '7ab406c5da3e95de16989ec4f81c95e5b656e1c3', 'buildtools': Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf8d11e41138ca777f3eaa09df41bf968c8be6ba', 'buildtools/clang_format/script': From ebd933037eb61bb3626675bdf2de800ba9f2518d Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Wed, 4 Jan 2023 14:25:03 +0100 Subject: [PATCH 179/654] [flags,testrunner] Consider readonly flags for conflict detection Flag conflict detection 1) bails out on incompatible flag values (e.g. --jitless and --turbofan) and 2) handles such bailouts transparently in the test runner by marking affected tests as OUTCOMES_FAIL. This CL adds full support for readonly flags to this system, together with required additional annotations in variants.py. Drive-by: assert proper use of v8_enable_slow_dchecks, and add support when dcheck_always_on is set. Drive-by: introduce has_maglev build variable detection based on v8_enable_maglev and use that for .status file annotations. Drive-by: protect against unintended overwrites of build variables in statusfile.py. Cq-Include-Trybots: luci.v8.try:v8_linux64_fyi_rel Bug: v8:13629,v8:10577 Change-Id: I04de399139a0490806df8bfee7e75e2ec767b4b5 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135879 Reviewed-by: Tobias Tebbi Reviewed-by: Victor Gomes Commit-Queue: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85130} --- BUILD.gn | 42 +++++---- bazel/defs.bzl | 8 ++ src/flags/flag-definitions.h | 30 ++++--- src/flags/flags.cc | 86 ++++++++++++++++--- src/flags/flags.h | 1 + src/runtime/runtime-test.cc | 3 +- test/mjsunit/mjsunit.status | 12 ++- tools/testrunner/base_runner.py | 21 +++++ tools/testrunner/build_config.py | 55 +++++++----- tools/testrunner/local/statusfile.py | 13 ++- tools/testrunner/local/variants.py | 83 ++++++++++++++---- tools/testrunner/standard_runner_test.py | 22 +++-- .../testroot1/out/build/v8_build_config.json | 10 ++- .../testroot2/out/build/v8_build_config.json | 10 ++- .../testroot3/out/build/v8_build_config.json | 10 ++- .../out.gn/build/v8_build_config.json | 10 ++- .../testroot6/out/build/v8_build_config.json | 10 ++- 17 files changed, 324 insertions(+), 102 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index e7b5853d0c..c02d018caa 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -516,6 +516,10 @@ assert(!v8_enable_trace_ignition || v8_enable_trace_unoptimized, "Ignition tracing requires unoptimized tracing to be enabled.") assert(!v8_enable_trace_baseline_exec || v8_enable_trace_unoptimized, "Baseline tracing requires unoptimized tracing to be enabled.") +assert( + v8_enable_debugging_features == true || dcheck_always_on || + !v8_enable_slow_dchecks, + "v8_enable_slow_dchecks requires v8_enable_debugging_features or dcheck_always_on.") if (v8_enable_short_builtin_calls && (!v8_enable_pointer_compression && v8_current_cpu != "x64")) { @@ -1294,13 +1298,12 @@ config("toolchain") { if ((is_linux || is_chromeos) && v8_enable_backtrace) { ldflags += [ "-rdynamic" ] } - + } + if (v8_enable_debugging_features || dcheck_always_on) { defines += [ "DEBUG" ] if (v8_enable_slow_dchecks) { defines += [ "ENABLE_SLOW_DCHECKS" ] } - } else if (dcheck_always_on) { - defines += [ "DEBUG" ] } if (v8_enable_verify_csa) { @@ -2309,6 +2312,7 @@ action("v8_dump_build_config") { script = "tools/testrunner/utils/dump_build_config.py" outputs = [ "$root_out_dir/v8_build_config.json" ] is_gcov_coverage = v8_code_coverage && !is_clang + is_DEBUG_defined = v8_enable_debugging_features || dcheck_always_on is_full_debug = v8_enable_debugging_features && !v8_optimized_debug args = [ rebase_path("$root_out_dir/v8_build_config.json", root_build_dir), @@ -2320,38 +2324,44 @@ action("v8_dump_build_config") { "is_clang=$is_clang", "is_component_build=$is_component_build", "is_debug=$v8_enable_debugging_features", + "is_DEBUG_defined=$is_DEBUG_defined", "is_full_debug=$is_full_debug", "is_gcov_coverage=$is_gcov_coverage", "is_msan=$is_msan", "is_tsan=$is_tsan", "is_ubsan_vptr=$is_ubsan_vptr", "target_cpu=\"$target_cpu\"", + "v8_code_comments=$v8_code_comments", + "v8_control_flow_integrity=$v8_control_flow_integrity", "v8_current_cpu=\"$v8_current_cpu\"", + "v8_dict_property_const_tracking=$v8_dict_property_const_tracking", + "v8_disable_write_barriers=$v8_disable_write_barriers", "v8_enable_atomic_object_field_writes=" + "$v8_enable_atomic_object_field_writes", + "v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack", + "v8_enable_concurrent_marking=$v8_enable_concurrent_marking", "v8_enable_conservative_stack_scanning=" + "$v8_enable_conservative_stack_scanning", - "v8_enable_concurrent_marking=$v8_enable_concurrent_marking", - "v8_enable_single_generation=$v8_enable_single_generation", + "v8_enable_debug_code=$v8_enable_debug_code", + "v8_enable_disassembler=$v8_enable_disassembler", + "v8_enable_gdbjit=$v8_enable_gdbjit", "v8_enable_i18n_support=$v8_enable_i18n_support", - "v8_enable_verify_predictable=$v8_enable_verify_predictable", - "v8_enable_verify_csa=$v8_enable_verify_csa", "v8_enable_lite_mode=$v8_enable_lite_mode", - "v8_enable_runtime_call_stats=$v8_enable_runtime_call_stats", + "v8_enable_maglev=$v8_enable_maglev", "v8_enable_pointer_compression=$v8_enable_pointer_compression", "v8_enable_pointer_compression_shared_cage=" + "$v8_enable_pointer_compression_shared_cage", + "v8_enable_runtime_call_stats=$v8_enable_runtime_call_stats", "v8_enable_sandbox=$v8_enable_sandbox", "v8_enable_shared_ro_heap=$v8_enable_shared_ro_heap", - "v8_disable_write_barriers=$v8_disable_write_barriers", - "v8_enable_third_party_heap=$v8_enable_third_party_heap", - "v8_enable_webassembly=$v8_enable_webassembly", - "v8_dict_property_const_tracking=$v8_dict_property_const_tracking", - "v8_control_flow_integrity=$v8_control_flow_integrity", - "v8_target_cpu=\"$v8_target_cpu\"", - "v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack", - "v8_enable_verify_heap=$v8_enable_verify_heap", + "v8_enable_single_generation=$v8_enable_single_generation", "v8_enable_slow_dchecks=$v8_enable_slow_dchecks", + "v8_enable_third_party_heap=$v8_enable_third_party_heap", + "v8_enable_verify_csa=$v8_enable_verify_csa", + "v8_enable_verify_heap=$v8_enable_verify_heap", + "v8_enable_verify_predictable=$v8_enable_verify_predictable", + "v8_enable_webassembly=$v8_enable_webassembly", + "v8_target_cpu=\"$v8_target_cpu\"", ] if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { diff --git a/bazel/defs.bzl b/bazel/defs.bzl index 870e7268df..facf35803e 100644 --- a/bazel/defs.bzl +++ b/bazel/defs.bzl @@ -535,6 +535,14 @@ def build_config_content(cpu, icu): ("v8_enable_shared_ro_heap", "false"), ("v8_disable_write_barriers", "false"), ("v8_target_cpu", cpu), + ("v8_code_comments", "false"), + ("v8_enable_debug_code", "false"), + ("v8_enable_verify_heap", "false"), + ("v8_enable_slow_dchecks", "false"), + ("v8_enable_maglev", "false"), + ("v8_enable_disassembler", "false"), + ("is_DEBUG_defined", "false"), + ("v8_enable_gdbjit", "false"), ]) # TODO(victorgomes): Create a rule (instead of a macro), that can diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index b795acb382..4659c2df73 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -42,12 +42,18 @@ #elif defined(FLAG_MODE_DEFINE_DEFAULTS) #define FLAG_FULL(ftype, ctype, nam, def, cmt) \ static constexpr ctype FLAGDEFAULT_##nam{def}; +#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \ + static constexpr ctype FLAGDEFAULT_##nam{def}; // We want to write entries into our meta data table, for internal parsing and -// printing / etc in the flag parser code. We only do this for writable flags. +// printing / etc in the flag parser code. #elif defined(FLAG_MODE_META) #define FLAG_FULL(ftype, ctype, nam, def, cmt) \ {Flag::TYPE_##ftype, #nam, &v8_flags.nam, &FLAGDEFAULT_##nam, cmt, false}, +// Readonly flags don't pass the value pointer since the struct expects a +// mutable value. That's okay since the value always equals the default. +#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \ + {Flag::TYPE_##ftype, #nam, nullptr, &FLAGDEFAULT_##nam, cmt, false}, #define FLAG_ALIAS(ftype, ctype, alias, nam) \ {Flag::TYPE_##ftype, #alias, &v8_flags.nam, &FLAGDEFAULT_##nam, \ "alias for --" #nam, false}, // NOLINT(whitespace/indent) @@ -56,20 +62,20 @@ #elif defined(FLAG_MODE_DEFINE_IMPLICATIONS) #define DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value) \ changed |= TriggerImplication(v8_flags.whenflag, #whenflag, \ - &v8_flags.thenflag, value, false); + &v8_flags.thenflag, #thenflag, value, false); // A weak implication will be overwritten by a normal implication or by an // explicit flag. #define DEFINE_WEAK_VALUE_IMPLICATION(whenflag, thenflag, value) \ changed |= TriggerImplication(v8_flags.whenflag, #whenflag, \ - &v8_flags.thenflag, value, true); + &v8_flags.thenflag, #thenflag, value, true); #define DEFINE_GENERIC_IMPLICATION(whenflag, statement) \ if (v8_flags.whenflag) statement; #define DEFINE_NEG_VALUE_IMPLICATION(whenflag, thenflag, value) \ changed |= TriggerImplication(!v8_flags.whenflag, "!" #whenflag, \ - &v8_flags.thenflag, value, false); + &v8_flags.thenflag, #thenflag, value, false); // We apply a generic macro to the flags. #elif defined(FLAG_MODE_APPLY) @@ -772,6 +778,7 @@ DEFINE_BOOL( stress_concurrent_inlining, false, "create additional concurrent optimization jobs but throw away result") DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_recompilation) +DEFINE_IMPLICATION(stress_concurrent_inlining, turbofan) DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation) DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget, 15 * KB) @@ -2246,18 +2253,17 @@ DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space) DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory) // --perf-prof-unwinding-info is available only on selected architectures. -#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \ - !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 -#undef DEFINE_PERF_PROF_BOOL -#define DEFINE_PERF_PROF_BOOL(nam, cmt) DEFINE_BOOL_READONLY(nam, false, cmt) -#undef DEFINE_PERF_PROF_IMPLICATION -#define DEFINE_PERF_PROF_IMPLICATION(...) -#endif - +#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \ + V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_PPC64 DEFINE_PERF_PROF_BOOL( perf_prof_unwinding_info, "Enable unwinding info for perf linux profiler (experimental).") DEFINE_PERF_PROF_IMPLICATION(perf_prof, perf_prof_unwinding_info) +#else +DEFINE_BOOL_READONLY( + perf_prof_unwinding_info, false, + "Enable unwinding info for perf linux profiler (experimental).") +#endif #undef DEFINE_PERF_PROF_BOOL #undef DEFINE_PERF_PROF_IMPLICATION diff --git a/src/flags/flags.cc b/src/flags/flags.cc index ab66eca43b..2b9ae1dffe 100644 --- a/src/flags/flags.cc +++ b/src/flags/flags.cc @@ -44,6 +44,7 @@ static_assert(sizeof(FlagValues) % kMinimumOSPageSize == 0); // Define all of our flags default values. #define FLAG_MODE_DEFINE_DEFAULTS #include "src/flags/flag-definitions.h" // NOLINT(build/include) +#undef FLAG_MODE_DEFINE_DEFAULTS namespace { @@ -91,6 +92,10 @@ struct Flag { enum class SetBy { kDefault, kWeakImplication, kImplication, kCommandLine }; + constexpr bool IsAnyImplication(Flag::SetBy set_by) { + return set_by == SetBy::kWeakImplication || set_by == SetBy::kImplication; + } + FlagType type_; // What type of flag, bool, int, or string. const char* name_; // Name of the flag, ex "my_flag". void* valptr_; // Pointer to the global flag variable. @@ -178,39 +183,44 @@ struct Flag { } } + template + T GetDefaultValue() const { + return *reinterpret_cast(defptr_); + } + bool bool_default() const { DCHECK_EQ(TYPE_BOOL, type_); - return *reinterpret_cast(defptr_); + return GetDefaultValue(); } int int_default() const { DCHECK_EQ(TYPE_INT, type_); - return *reinterpret_cast(defptr_); + return GetDefaultValue(); } unsigned int uint_default() const { DCHECK_EQ(TYPE_UINT, type_); - return *reinterpret_cast(defptr_); + return GetDefaultValue(); } uint64_t uint64_default() const { DCHECK_EQ(TYPE_UINT64, type_); - return *reinterpret_cast(defptr_); + return GetDefaultValue(); } double float_default() const { DCHECK_EQ(TYPE_FLOAT, type_); - return *reinterpret_cast(defptr_); + return GetDefaultValue(); } size_t size_t_default() const { DCHECK_EQ(TYPE_SIZE_T, type_); - return *reinterpret_cast(defptr_); + return GetDefaultValue(); } const char* string_default() const { DCHECK_EQ(TYPE_STRING, type_); - return *reinterpret_cast(defptr_); + return GetDefaultValue(); } static bool ShouldCheckFlagContradictions() { @@ -244,6 +254,19 @@ struct Flag { MSVC_SUPPRESS_WARNING(4722) ~FatalError() { FATAL("%s.\n%s", str().c_str(), kHint); } }; + // Readonly flags cannot change value. + if (change_flag && IsReadOnly()) { + // Exit instead of abort for certain testing situations. + if (v8_flags.exit_on_contradictory_flags) base::OS::ExitProcess(0); + if (implied_by == nullptr) { + FatalError{} << "Contradictory value for readonly flag " + << FlagName{name()}; + } else { + DCHECK(IsAnyImplication(new_set_by)); + FatalError{} << "Contradictory value for readonly flag " + << FlagName{name()} << " implied by " << implied_by; + } + } // For bool flags, we only check for a conflict if the value actually // changes. So specifying the same flag with the same value multiple times // is allowed. @@ -302,28 +325,39 @@ struct Flag { break; } } + if (change_flag && IsReadOnly()) { + // Readonly flags must never change value. + return false; + } set_by_ = new_set_by; - if (new_set_by == SetBy::kImplication || - new_set_by == SetBy::kWeakImplication) { + if (IsAnyImplication(new_set_by)) { DCHECK_NOT_NULL(implied_by); implied_by_ = implied_by; } return change_flag; } + bool IsReadOnly() const { + // See the FLAG_READONLY definition for FLAG_MODE_META. + return valptr_ == nullptr; + } + template T GetValue() const { DCHECK_EQ(flag_type, type_); + if (IsReadOnly()) return GetDefaultValue(); return *reinterpret_cast*>(valptr_); } template void SetValue(T new_value, SetBy set_by) { DCHECK_EQ(flag_type, type_); - auto* flag_value = reinterpret_cast*>(valptr_); - bool change_flag = flag_value->value() != new_value; + bool change_flag = GetValue() != new_value; change_flag = CheckFlagChange(set_by, change_flag); - if (change_flag) *flag_value = new_value; + if (change_flag) { + DCHECK(!IsReadOnly()); + *reinterpret_cast*>(valptr_) = new_value; + } } // Compare this flag's current value against the default. @@ -395,6 +429,7 @@ struct Flag { Flag flags[] = { #define FLAG_MODE_META #include "src/flags/flag-definitions.h" // NOLINT(build/include) +#undef FLAG_MODE_META }; constexpr size_t kNumFlags = arraysize(flags); @@ -851,10 +886,11 @@ class ImplicationProcessor { // Called from {DEFINE_*_IMPLICATION} in flag-definitions.h. template bool TriggerImplication(bool premise, const char* premise_name, - FlagValue* conclusion_value, T value, + FlagValue* conclusion_value, + const char* conclusion_name, T value, bool weak_implication) { if (!premise) return false; - Flag* conclusion_flag = FindFlagByPointer(conclusion_value); + Flag* conclusion_flag = FindFlagByName(conclusion_name); if (!conclusion_flag->CheckFlagChange( weak_implication ? Flag::SetBy::kWeakImplication : Flag::SetBy::kImplication, @@ -873,6 +909,28 @@ class ImplicationProcessor { return true; } + // Called from {DEFINE_*_IMPLICATION} in flag-definitions.h. + template + bool TriggerImplication(bool premise, const char* premise_name, + const FlagValue* conclusion_value, + const char* conclusion_name, T value, + bool weak_implication) { + if (!premise) return false; + Flag* conclusion_flag = FindFlagByName(conclusion_name); + // Because this is the `const FlagValue*` overload: + DCHECK(conclusion_flag->IsReadOnly()); + if (!conclusion_flag->CheckFlagChange( + weak_implication ? Flag::SetBy::kWeakImplication + : Flag::SetBy::kImplication, + conclusion_value->value() != value, premise_name)) { + return false; + } + // Must equal the default value, otherwise CheckFlagChange should've + // returned false. + DCHECK_EQ(value, conclusion_flag->GetDefaultValue()); + return true; + } + void CheckForCycle() { // Make sure flag implications reach a fixed point within // {kMaxNumIterations} iterations. diff --git a/src/flags/flags.h b/src/flags/flags.h index 690492f078..18446c78bf 100644 --- a/src/flags/flags.h +++ b/src/flags/flags.h @@ -66,6 +66,7 @@ struct alignas(kMinimumOSPageSize) FlagValues { #define FLAG_MODE_DECLARE #include "src/flags/flag-definitions.h" // NOLINT(build/include) +#undef FLAG_MODE_DECLARE }; V8_EXPORT_PRIVATE extern FlagValues v8_flags; diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc index 7e2fa20a00..a7a8f64cae 100644 --- a/src/runtime/runtime-test.cc +++ b/src/runtime/runtime-test.cc @@ -265,7 +265,8 @@ bool CanOptimizeFunction(CodeKind target_kind, Handle function, return CrashUnlessFuzzingReturnFalse(isolate); } - if (!v8_flags.turbofan) return false; + if (target_kind == CodeKind::TURBOFAN && !v8_flags.turbofan) return false; + if (target_kind == CodeKind::MAGLEV && !v8_flags.maglev) return false; if (function->shared().optimization_disabled() && function->shared().disabled_optimization_reason() == diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index 21b9724c18..a800b8fb7c 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -213,6 +213,12 @@ # Needs deterministic test helpers for concurrent maglev tiering. # TODO(jgruber,v8:7700): Implement ASAP. 'maglev/18': [SKIP], + + # --perf-prof is only available on Linux, and --perf-prof-unwinding-info only + # on selected architectures. + 'regress/wasm/regress-1032753': [PASS, ['system != linux', SKIP]], + 'regress/regress-913844': [PASS, + ['system != linux or arch not in (arm, arm64, x64, s390x, ppc64)', SKIP]], }], # ALWAYS ############################################################################## @@ -1324,11 +1330,9 @@ }], # no_harness ############################################################################## -['arch != x64 or not pointer_compression or variant in (nooptimization, jitless)', { - # Maglev is x64-only for now. - # TODO(v8:7700): Update as we extend support. +['not has_maglev', { 'maglev/*': [SKIP], -}], # arch != x64 or not pointer_compression or variant in (nooptimization, jitless) +}], # not has_maglev ############################################################################## ['arch != x64 or deopt_fuzzer', { diff --git a/tools/testrunner/base_runner.py b/tools/testrunner/base_runner.py index a88d6fec05..89bb8ae90e 100644 --- a/tools/testrunner/base_runner.py +++ b/tools/testrunner/base_runner.py @@ -552,6 +552,8 @@ class BaseTestRunner(object): sys.byteorder, "cfi_vptr": self.build_config.cfi_vptr, + "code_comments": + self.build_config.code_comments, "component_build": self.build_config.component_build, "conservative_stack_scanning": @@ -564,8 +566,12 @@ class BaseTestRunner(object): self.build_config.single_generation, "dcheck_always_on": self.build_config.dcheck_always_on, + "debug_code": + self.build_config.debug_code, "deopt_fuzzer": False, + "disassembler": + self.build_config.disassembler, "endurance_fuzzer": False, "gc_fuzzer": @@ -574,12 +580,23 @@ class BaseTestRunner(object): False, "gcov_coverage": self.build_config.gcov_coverage, + "gdbjit": + self.build_config.gdbjit, + # TODO(jgruber): Note this rename from maglev to has_maglev is required + # to avoid a name clash with the "maglev" variant. See also the TODO in + # statusfile.py (this really shouldn't be needed). + "has_maglev": + self.build_config.maglev, "has_webassembly": self.build_config.webassembly, "isolates": self.options.isolates, "is_clang": self.build_config.is_clang, + "is_debug": + self.build_config.is_debug, + "is_DEBUG_defined": + self.build_config.is_DEBUG_defined, "is_full_debug": self.build_config.is_full_debug, "interrupt_fuzzer": @@ -607,6 +624,8 @@ class BaseTestRunner(object): "simulator_run": self.build_config.simulator_run and not self.options.dont_skip_simulator_slow_tests, + "slow_dchecks": + self.build_config.slow_dchecks, "system": self.target_os, "third_party_heap": @@ -617,6 +636,8 @@ class BaseTestRunner(object): self.build_config.ubsan_vptr, "verify_csa": self.build_config.verify_csa, + "verify_heap": + self.build_config.verify_heap, "lite_mode": self.build_config.lite_mode, "pointer_compression": diff --git a/tools/testrunner/build_config.py b/tools/testrunner/build_config.py index cdc11681f8..10b5749c5f 100644 --- a/tools/testrunner/build_config.py +++ b/tools/testrunner/build_config.py @@ -23,40 +23,46 @@ class BuildConfig(object): self.asan = build_config['is_asan'] self.cfi_vptr = build_config['is_cfi'] + self.code_comments = build_config['v8_code_comments'] self.component_build = build_config['is_component_build'] + self.concurrent_marking = build_config['v8_enable_concurrent_marking'] self.conservative_stack_scanning = build_config[ 'v8_enable_conservative_stack_scanning'] self.control_flow_integrity = build_config['v8_control_flow_integrity'] - self.concurrent_marking = build_config['v8_enable_concurrent_marking'] - self.single_generation = build_config['v8_enable_single_generation'] self.dcheck_always_on = build_config['dcheck_always_on'] + self.debug_code = build_config['v8_enable_debug_code'] + self.dict_property_const_tracking = build_config[ + 'v8_dict_property_const_tracking'] + self.disassembler = build_config['v8_enable_disassembler'] self.gcov_coverage = build_config['is_gcov_coverage'] + self.gdbjit = build_config['v8_enable_gdbjit'] self.is_android = build_config['is_android'] self.is_clang = build_config['is_clang'] self.is_debug = build_config['is_debug'] + self.is_DEBUG_defined = build_config['is_DEBUG_defined'] self.is_full_debug = build_config['is_full_debug'] + self.lite_mode = build_config['v8_enable_lite_mode'] + self.maglev = build_config['v8_enable_maglev'] self.msan = build_config['is_msan'] self.no_i18n = not build_config['v8_enable_i18n_support'] + self.pointer_compression = build_config['v8_enable_pointer_compression'] + self.pointer_compression_shared_cage = build_config[ + 'v8_enable_pointer_compression_shared_cage'] self.predictable = build_config['v8_enable_verify_predictable'] + self.sandbox = build_config['v8_enable_sandbox'] + self.shared_ro_heap = build_config['v8_enable_shared_ro_heap'] self.simulator_run = ( build_config['target_cpu'] != build_config['v8_target_cpu']) + self.single_generation = build_config['v8_enable_single_generation'] + self.slow_dchecks = build_config['v8_enable_slow_dchecks'] + self.third_party_heap = build_config['v8_enable_third_party_heap'] self.tsan = build_config['is_tsan'] # TODO(machenbach): We only have ubsan not ubsan_vptr. self.ubsan_vptr = build_config['is_ubsan_vptr'] self.verify_csa = build_config['v8_enable_verify_csa'] self.verify_heap = build_config['v8_enable_verify_heap'] - self.slow_dchecks = build_config['v8_enable_slow_dchecks'] - self.lite_mode = build_config['v8_enable_lite_mode'] - self.pointer_compression = build_config['v8_enable_pointer_compression'] - self.pointer_compression_shared_cage = build_config[ - 'v8_enable_pointer_compression_shared_cage'] - self.shared_ro_heap = build_config['v8_enable_shared_ro_heap'] - self.write_barriers = not build_config['v8_disable_write_barriers'] - self.sandbox = build_config['v8_enable_sandbox'] - self.third_party_heap = build_config['v8_enable_third_party_heap'] self.webassembly = build_config['v8_enable_webassembly'] - self.dict_property_const_tracking = build_config[ - 'v8_dict_property_const_tracking'] + self.write_barriers = not build_config['v8_disable_write_barriers'] # Export only for MIPS target if self.arch in ['mips64', 'mips64el']: self._mips_arch_variant = build_config['mips_arch_variant'] @@ -138,24 +144,31 @@ class BuildConfig(object): attrs = [ 'asan', 'cfi_vptr', + 'code_comments', 'control_flow_integrity', 'dcheck_always_on', + 'debug_code', + 'dict_property_const_tracking', + 'disassembler', 'gcov_coverage', + 'gdbjit', + 'is_debug', + 'is_DEBUG_defined', + 'lite_mode', + 'maglev', 'msan', 'no_i18n', + 'pointer_compression', + 'pointer_compression_shared_cage', 'predictable', + 'sandbox', + 'slow_dchecks', + 'third_party_heap', 'tsan', 'ubsan_vptr', 'verify_csa', - 'lite_mode', - 'pointer_compression', - 'pointer_compression_shared_cage', - 'sandbox', - 'third_party_heap', - 'webassembly', - 'dict_property_const_tracking', 'verify_heap', - 'slow_dchecks', + 'webassembly', ] detected_options = [attr for attr in attrs if getattr(self, attr, False)] return '\n'.join(detected_options) diff --git a/tools/testrunner/local/statusfile.py b/tools/testrunner/local/statusfile.py index 5f9766e85c..04485936b6 100644 --- a/tools/testrunner/local/statusfile.py +++ b/tools/testrunner/local/statusfile.py @@ -63,10 +63,12 @@ for var in [ "windows", "linux", "aix", "r1", "r2", "r3", "r5", "r6", "riscv32", "riscv64", "loong64" ]: + assert var not in VARIABLES VARIABLES[var] = var # Allow using variants as keywords. for var in ALL_VARIANTS: + assert var not in VARIABLES VARIABLES[var] = var class StatusFile(object): @@ -244,7 +246,16 @@ def ReadStatusFile(content, variables): prefix_rules = {variant: {} for variant in ALL_VARIANTS} prefix_rules[""] = {} - variables.update(VARIABLES) + # This method can be called with the same `variables` object multiple times. + # Ensure we only update `variables` (and check it for consistency) once. + if ALWAYS not in variables: + # Ensure we don't silently overwrite any build variables with our set of + # default keywords in VARIABLES. + for var in VARIABLES: + assert var not in variables, ( + "build_config variable '%s' conflicts with VARIABLES" % var) + variables.update(VARIABLES) + for conditional_section in ReadContent(content): assert type(conditional_section) == list assert len(conditional_section) == 2 diff --git a/tools/testrunner/local/variants.py b/tools/testrunner/local/variants.py index 31da71cf6d..d66f430c8b 100644 --- a/tools/testrunner/local/variants.py +++ b/tools/testrunner/local/variants.py @@ -58,23 +58,35 @@ ALL_VARIANT_FLAGS = { "google3_noicu": [[]], } +# Note these are specifically for the case when Turbofan is either fully +# disabled (i.e. not part of the binary), or when all codegen is disallowed (in +# jitless mode). +kIncompatibleFlagsForNoTurbofan = [ + "--turbofan", "--always-turbofan", "--liftoff", "--validate-asm", + "--maglev", "--stress-concurrent-inlining" +] + # Flags that lead to a contradiction with the flags provided by the respective # variant. This depends on the flags specified in ALL_VARIANT_FLAGS and on the # implications defined in flag-definitions.h. INCOMPATIBLE_FLAGS_PER_VARIANT = { - "jitless": [ - "--turbofan", "--always-turbofan", "--liftoff", "--track-field-types", - "--validate-asm", "--sparkplug", "--concurrent-sparkplug", "--maglev", - "--always-sparkplug", "--regexp-tier-up", "--no-regexp-interpret-all" + "jitless": + kIncompatibleFlagsForNoTurbofan + [ + "--track-field-types", "--sparkplug", "--concurrent-sparkplug", + "--always-sparkplug", "--regexp-tier-up", + "--no-regexp-interpret-all" + ], + "nooptimization": [ + "--turbofan", "--always-turbofan", "--stress-concurrent-inlining" ], - "nooptimization": ["--always-turbofan"], "slow_path": ["--no-force-slow-path"], "stress_concurrent_allocation": [ "--single-threaded", "--single-threaded-gc", "--predictable" ], "stress_concurrent_inlining": [ "--single-threaded", "--predictable", "--lazy-feedback-allocation", - "--assert-types", "--no-concurrent-recompilation" + "--assert-types", "--no-concurrent-recompilation", "--no-turbofan", + "--jitless" ], # The fast API tests initialize an embedder object that never needs to be # serialized to the snapshot, so we don't have a @@ -111,16 +123,55 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = { # # applies when the code_comments build variable is NOT set. INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = { - "lite_mode": ["--no-lazy-feedback-allocation", "--max-semi-space-size=*", - "--stress-concurrent-inlining"] - + INCOMPATIBLE_FLAGS_PER_VARIANT["jitless"], - "predictable": ["--parallel-compile-tasks-for-eager-toplevel", - "--parallel-compile-tasks-for-lazy", - "--concurrent-recompilation", - "--stress-concurrent-allocation", - "--stress-concurrent-inlining"], - "dict_property_const_tracking": [ - "--stress-concurrent-inlining"], + "!code_comments": ["--code-comments"], + "!is_DEBUG_defined": [ + "--check_handle_count", + "--code_stats", + "--dump_wasm_module", + "--enable_testing_opcode_in_wasm", + "--gc_verbose", + "--print_ast", + "--print_break_location", + "--print_global_handles", + "--print_handles", + "--print_scopes", + "--regexp_possessive_quantifier", + "--trace_backing_store", + "--trace_contexts", + "--trace_isolates", + "--trace_lazy", + "--trace_liftoff", + "--trace_module_status", + "--trace_normalization", + "--trace_turbo_escape", + "--trace_wasm_compiler", + "--trace_wasm_decoder", + "--trace_wasm_instances", + "--trace_wasm_interpreter", + "--trace_wasm_lazy_compilation", + "--trace_wasm_native_heap", + "--trace_wasm_serialization", + "--trace_wasm_stack_switching", + "--trace_wasm_streaming", + "--trap_on_abort", + ], + "!verify_heap": ["--verify-heap"], + "!debug_code": ["--debug-code"], + "!disassembler": [ + "--print_all_code", "--print_code", "--print_opt_code", + "--print_code_verbose", "--print_builtin_code", "--print_regexp_code" + ], + "!slow_dchecks": ["--enable-slow-asserts"], + "!gdbjit": ["--gdbjit", "--gdbjit_full", "--gdbjit_dump"], + "!maglev": ["--maglev"], + "lite_mode": ["--no-lazy-feedback-allocation", "--max-semi-space-size=*"] + + INCOMPATIBLE_FLAGS_PER_VARIANT["jitless"], + "predictable": [ + "--parallel-compile-tasks-for-eager-toplevel", + "--parallel-compile-tasks-for-lazy", "--concurrent-recompilation", + "--stress-concurrent-allocation", "--stress-concurrent-inlining" + ], + "dict_property_const_tracking": ["--stress-concurrent-inlining"], } # Flags that lead to a contradiction when a certain extra-flag is present. diff --git a/tools/testrunner/standard_runner_test.py b/tools/testrunner/standard_runner_test.py index 65d98b65ed..770a29bf88 100644 --- a/tools/testrunner/standard_runner_test.py +++ b/tools/testrunner/standard_runner_test.py @@ -231,18 +231,16 @@ class StandardRunnerTest(TestRunnerTest): v8_enable_sandbox=False ) ) - expect_text = ( - '>>> Autodetected:\n' - 'asan\n' - 'cfi_vptr\n' - 'dcheck_always_on\n' - 'msan\n' - 'no_i18n\n' - 'tsan\n' - 'ubsan_vptr\n' - 'webassembly\n' - '>>> Running tests for ia32.release') - result.stdout_includes(expect_text) + result.stdout_includes('>>> Autodetected:') + result.stdout_includes('asan') + result.stdout_includes('cfi_vptr') + result.stdout_includes('dcheck_always_on') + result.stdout_includes('msan') + result.stdout_includes('no_i18n') + result.stdout_includes('tsan') + result.stdout_includes('ubsan_vptr') + result.stdout_includes('webassembly') + result.stdout_includes('>>> Running tests for ia32.release') result.has_returncode(0) # TODO(machenbach): Test some more implications of the auto-detected # options, e.g. that the right env variables are set. diff --git a/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json index 9f1743780e..c4aa78f7f5 100644 --- a/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json @@ -32,5 +32,13 @@ "v8_enable_single_generation": false, "v8_enable_third_party_heap": false, "v8_enable_webassembly": true, - "v8_dict_property_const_tracking": false + "v8_dict_property_const_tracking": false, + "v8_code_comments": false, + "v8_enable_debug_code": false, + "v8_enable_verify_heap": false, + "v8_enable_slow_dchecks": false, + "v8_enable_maglev": false, + "v8_enable_disassembler": false, + "is_DEBUG_defined": false, + "v8_enable_gdbjit": false } diff --git a/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json index a0b2cb87b4..3ad5534b05 100644 --- a/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json @@ -32,5 +32,13 @@ "v8_enable_single_generation": false, "v8_enable_third_party_heap": false, "v8_enable_webassembly": true, - "v8_dict_property_const_tracking": false + "v8_dict_property_const_tracking": false, + "v8_code_comments": false, + "v8_enable_debug_code": false, + "v8_enable_verify_heap": false, + "v8_enable_slow_dchecks": false, + "v8_enable_maglev": false, + "v8_enable_disassembler": false, + "is_DEBUG_defined": false, + "v8_enable_gdbjit": false } diff --git a/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json index 9f1743780e..c4aa78f7f5 100644 --- a/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json @@ -32,5 +32,13 @@ "v8_enable_single_generation": false, "v8_enable_third_party_heap": false, "v8_enable_webassembly": true, - "v8_dict_property_const_tracking": false + "v8_dict_property_const_tracking": false, + "v8_code_comments": false, + "v8_enable_debug_code": false, + "v8_enable_verify_heap": false, + "v8_enable_slow_dchecks": false, + "v8_enable_maglev": false, + "v8_enable_disassembler": false, + "is_DEBUG_defined": false, + "v8_enable_gdbjit": false } diff --git a/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json b/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json index 9f1743780e..c4aa78f7f5 100644 --- a/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json @@ -32,5 +32,13 @@ "v8_enable_single_generation": false, "v8_enable_third_party_heap": false, "v8_enable_webassembly": true, - "v8_dict_property_const_tracking": false + "v8_dict_property_const_tracking": false, + "v8_code_comments": false, + "v8_enable_debug_code": false, + "v8_enable_verify_heap": false, + "v8_enable_slow_dchecks": false, + "v8_enable_maglev": false, + "v8_enable_disassembler": false, + "is_DEBUG_defined": false, + "v8_enable_gdbjit": false } diff --git a/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json index 9f1743780e..c4aa78f7f5 100644 --- a/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json @@ -32,5 +32,13 @@ "v8_enable_single_generation": false, "v8_enable_third_party_heap": false, "v8_enable_webassembly": true, - "v8_dict_property_const_tracking": false + "v8_dict_property_const_tracking": false, + "v8_code_comments": false, + "v8_enable_debug_code": false, + "v8_enable_verify_heap": false, + "v8_enable_slow_dchecks": false, + "v8_enable_maglev": false, + "v8_enable_disassembler": false, + "is_DEBUG_defined": false, + "v8_enable_gdbjit": false } From ff2b5a672985c0d18d2871732dd4407fd107032d Mon Sep 17 00:00:00 2001 From: Jaroslav Sevcik Date: Thu, 5 Jan 2023 17:30:33 +0100 Subject: [PATCH 180/654] [inspector] Avoid sliding breakpoints for same scripts We change the breakpoint hint logic to check if the script has not locally changed (with a hash of the source text between the requested breakpoint location and the actual breakpoint location). If the text did not change, we set the breakpoint at the same location as before. Bug: chromium:1404643 Change-Id: I6ceecf9924e699aaf37518680d1cb79d3eb00959 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4138260 Commit-Queue: Jaroslav Sevcik Reviewed-by: Benedikt Meurer Cr-Commit-Position: refs/heads/main@{#85131} --- BUILD.bazel | 2 + src/inspector/BUILD.gn | 2 + src/inspector/crc32.cc | 85 ++++++++++++++ src/inspector/crc32.h | 16 +++ src/inspector/v8-debugger-agent-impl.cc | 111 +++++++++++++----- .../debugger/restore-breakpoint-expected.txt | 36 ++++++ test/inspector/debugger/restore-breakpoint.js | 17 +++ 7 files changed, 242 insertions(+), 27 deletions(-) create mode 100644 src/inspector/crc32.cc create mode 100644 src/inspector/crc32.h diff --git a/BUILD.bazel b/BUILD.bazel index d6792860e6..b84ca43f41 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -3255,6 +3255,8 @@ filegroup( filegroup( name = "v8_inspector_files", srcs = [ + "src/inspector/crc32.cc", + "src/inspector/crc32.h", "src/inspector/custom-preview.cc", "src/inspector/custom-preview.h", "src/inspector/injected-script.cc", diff --git a/src/inspector/BUILD.gn b/src/inspector/BUILD.gn index 6a6ff4b818..87ae628aa9 100644 --- a/src/inspector/BUILD.gn +++ b/src/inspector/BUILD.gn @@ -102,6 +102,8 @@ v8_source_set("inspector") { "../../include/v8-inspector.h", ] sources += [ + "crc32.cc", + "crc32.h", "custom-preview.cc", "custom-preview.h", "injected-script.cc", diff --git a/src/inspector/crc32.cc b/src/inspector/crc32.cc new file mode 100644 index 0000000000..29d6460a0a --- /dev/null +++ b/src/inspector/crc32.cc @@ -0,0 +1,85 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/inspector/crc32.h" + +#include "src/base/macros.h" + +namespace v8_inspector { + +// Generated from the polynomial 0xedb88320 using the following script: +// for i in range(0, 256): +// c = i ^ 0xff +// for j in range(0, 8): +// l = 0 if c & 1 else 0xedb88320 +// c = (c >> 1) ^ l +// print("0x%x" % (c)) +static uint32_t kCrcTable[256] = { + 0x0L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x76dc419L, + 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0xedb8832L, 0x79dcb8a4L, + 0xe0d5e91eL, 0x97d2d988L, 0x9b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, + 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, + 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, + 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, + 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, + 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, + 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, + 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, + 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, + 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, + 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, + 0x1db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x6b6b51fL, + 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0xf00f934L, 0x9609a88eL, + 0xe10e9818L, 0x7f6a0dbbL, 0x86d3d2dL, 0x91646c97L, 0xe6635c01L, + 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, + 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, + 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, + 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, + 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, + 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, + 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, + 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, + 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, + 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, + 0x3b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x4db2615L, + 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0xd6d6a3eL, 0x7a6a5aa8L, + 0xe40ecf0bL, 0x9309ff9dL, 0xa00ae27L, 0x7d079eb1L, 0xf00f9344L, + 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, + 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, + 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, + 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, + 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, + 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, + 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, + 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, + 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, + 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, + 0x26d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x5005713L, + 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0xcb61b38L, 0x92d28e9bL, + 0xe5d5be0dL, 0x7cdcefb7L, 0xbdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, + 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, + 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, + 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, + 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, + 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, + 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, + 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, + 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, + 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, + 0x2d02ef8dL}; + +int32_t computeCrc32(const String16& text) { + const uint8_t* bytes = reinterpret_cast(text.characters16()); + size_t byteLength = sizeof(UChar) * text.length(); + + uint32_t checksum = 0; + for (size_t i = 0; i < byteLength; ++i) { + uint32_t index = (checksum ^ bytes[i]) & 0xff; + checksum = (checksum >> 8) ^ kCrcTable[index]; + } + + return v8::base::bit_cast(checksum); +} + +} // namespace v8_inspector diff --git a/src/inspector/crc32.h b/src/inspector/crc32.h new file mode 100644 index 0000000000..c20b56a660 --- /dev/null +++ b/src/inspector/crc32.h @@ -0,0 +1,16 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_INSPECTOR_CRC32_H_ +#define V8_INSPECTOR_CRC32_H_ + +#include "src/inspector/string-16.h" + +namespace v8_inspector { + +int32_t computeCrc32(const String16&); + +} + +#endif // V8_INSPECTOR_CRC32_H_ diff --git a/src/inspector/v8-debugger-agent-impl.cc b/src/inspector/v8-debugger-agent-impl.cc index d6ed714ef7..9131dd9f02 100644 --- a/src/inspector/v8-debugger-agent-impl.cc +++ b/src/inspector/v8-debugger-agent-impl.cc @@ -14,6 +14,7 @@ #include "include/v8-microtask-queue.h" #include "src/base/safe_conversions.h" #include "src/debug/debug-interface.h" +#include "src/inspector/crc32.h" #include "src/inspector/injected-script.h" #include "src/inspector/inspected-context.h" #include "src/inspector/protocol/Debugger.h" @@ -55,6 +56,9 @@ static const char breakpointsByRegex[] = "breakpointsByRegex"; static const char breakpointsByUrl[] = "breakpointsByUrl"; static const char breakpointsByScriptHash[] = "breakpointsByScriptHash"; static const char breakpointHints[] = "breakpointHints"; +static const char breakpointHintText[] = "text"; +static const char breakpointHintPrefixHash[] = "prefixHash"; +static const char breakpointHintPrefixLength[] = "prefixLen"; static const char instrumentationBreakpoints[] = "instrumentationBreakpoints"; } // namespace DebuggerAgentState @@ -179,23 +183,46 @@ bool positionComparator(const std::pair& a, return a.second < b.second; } -String16 breakpointHint(const V8DebuggerScript& script, int lineNumber, - int columnNumber) { - int offset; - if (!script.offset(lineNumber, columnNumber).To(&offset)) return String16(); +std::unique_ptr breakpointHint( + const V8DebuggerScript& script, int breakpointLineNumber, + int breakpointColumnNumber, int actualLineNumber, int actualColumnNumber) { + int actualOffset; + int breakpointOffset; + if (!script.offset(actualLineNumber, actualColumnNumber).To(&actualOffset) || + !script.offset(breakpointLineNumber, breakpointColumnNumber) + .To(&breakpointOffset)) { + return {}; + } + + auto hintObject = protocol::DictionaryValue::create(); String16 hint = - script.source(offset, kBreakpointHintMaxLength).stripWhiteSpace(); + script.source(actualOffset, kBreakpointHintMaxLength).stripWhiteSpace(); for (size_t i = 0; i < hint.length(); ++i) { if (hint[i] == '\r' || hint[i] == '\n' || hint[i] == ';') { - return hint.substring(0, i); + hint = hint.substring(0, i); + break; } } - return hint; + hintObject->setString(DebuggerAgentState::breakpointHintText, hint); + + // Also store the hash of the text between the requested breakpoint location + // and the actual breakpoint location. If we see the same prefix text next + // time, we will keep the breakpoint at the same location (so that + // breakpoints do not slide around on reloads without any edits). + if (breakpointOffset <= actualOffset) { + size_t length = actualOffset - breakpointOffset; + String16 prefix = script.source(breakpointOffset, length); + int crc32 = computeCrc32(prefix); + hintObject->setInteger(DebuggerAgentState::breakpointHintPrefixHash, crc32); + hintObject->setInteger(DebuggerAgentState::breakpointHintPrefixLength, + v8::base::checked_cast(length)); + } + return hintObject; } void adjustBreakpointLocation(const V8DebuggerScript& script, - const String16& hint, int* lineNumber, - int* columnNumber) { + const protocol::DictionaryValue* hintObject, + int* lineNumber, int* columnNumber) { if (*lineNumber < script.startLine() || *lineNumber > script.endLine()) return; if (*lineNumber == script.startLine() && @@ -206,15 +233,41 @@ void adjustBreakpointLocation(const V8DebuggerScript& script, return; } - if (hint.isEmpty()) return; int sourceOffset; if (!script.offset(*lineNumber, *columnNumber).To(&sourceOffset)) return; + int prefixLength = 0; + hintObject->getInteger(DebuggerAgentState::breakpointHintPrefixLength, + &prefixLength); + String16 hint; + if (!hintObject->getString(DebuggerAgentState::breakpointHintText, &hint) || + hint.isEmpty()) + return; + intptr_t searchRegionOffset = std::max( sourceOffset - kBreakpointHintMaxSearchOffset, static_cast(0)); size_t offset = sourceOffset - searchRegionOffset; - String16 searchArea = script.source(searchRegionOffset, - offset + kBreakpointHintMaxSearchOffset); + size_t searchRegionSize = + offset + std::max(kBreakpointHintMaxSearchOffset, + static_cast(prefixLength + hint.length())); + + String16 searchArea = script.source(searchRegionOffset, searchRegionSize); + + // Let us see if the breakpoint hint text appears at the same location + // as before, with the same prefix text in between. If yes, then we just use + // that position. + int prefixHash; + if (hintObject->getInteger(DebuggerAgentState::breakpointHintPrefixHash, + &prefixHash) && + offset + prefixLength + hint.length() <= searchArea.length() && + searchArea.substring(offset + prefixLength, hint.length()) == hint && + computeCrc32(searchArea.substring(offset, prefixLength)) == prefixHash) { + v8::debug::Location hintPosition = + script.location(static_cast(offset + prefixLength)); + *lineNumber = hintPosition.GetLineNumber(); + *columnNumber = hintPosition.GetColumnNumber(); + return; + } size_t nextMatch = searchArea.find(hint, offset); size_t prevMatch = searchArea.reverseFind(hint, offset); @@ -222,7 +275,8 @@ void adjustBreakpointLocation(const V8DebuggerScript& script, return; } size_t bestMatch; - if (nextMatch == String16::kNotFound) { + if (nextMatch == String16::kNotFound || + nextMatch > kBreakpointHintMaxSearchOffset) { bestMatch = prevMatch; } else if (prevMatch == String16::kNotFound) { bestMatch = nextMatch; @@ -588,26 +642,30 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl( "Breakpoint at specified location already exists."); } - String16 hint; + std::unique_ptr hint; for (const auto& script : m_scripts) { if (!matches(m_inspector, *script.second, type, selector)) continue; - if (!hint.isEmpty()) { - adjustBreakpointLocation(*script.second, hint, &lineNumber, - &columnNumber); + int adjustedLineNumber = lineNumber; + int adjustedColumnNumber = columnNumber; + if (hint) { + adjustBreakpointLocation(*script.second, hint.get(), &adjustedLineNumber, + &adjustedColumnNumber); } - std::unique_ptr location = setBreakpointImpl( - breakpointId, script.first, condition, lineNumber, columnNumber); + std::unique_ptr location = + setBreakpointImpl(breakpointId, script.first, condition, + adjustedLineNumber, adjustedColumnNumber); if (location && type != BreakpointType::kByUrlRegex) { - hint = breakpointHint(*script.second, location->getLineNumber(), - location->getColumnNumber(columnNumber)); + hint = breakpointHint(*script.second, lineNumber, columnNumber, + location->getLineNumber(), + location->getColumnNumber(adjustedColumnNumber)); } if (location) (*locations)->emplace_back(std::move(location)); } breakpoints->setString(breakpointId, condition); - if (!hint.isEmpty()) { + if (hint) { protocol::DictionaryValue* breakpointHints = getOrCreateObject(m_state, DebuggerAgentState::breakpointHints); - breakpointHints->setString(breakpointId, hint); + breakpointHints->setObject(breakpointId, std::move(hint)); } *outBreakpointId = breakpointId; return Response::Success(); @@ -1892,10 +1950,9 @@ void V8DebuggerAgentImpl::didParseSource( if (!matches(m_inspector, *scriptRef, type, selector)) continue; String16 condition; breakpointWithCondition.second->asString(&condition); - String16 hint; - bool hasHint = - breakpointHints && breakpointHints->getString(breakpointId, &hint); - if (hasHint) { + protocol::DictionaryValue* hint = + breakpointHints ? breakpointHints->getObject(breakpointId) : nullptr; + if (hint) { adjustBreakpointLocation(*scriptRef, hint, &lineNumber, &columnNumber); } std::unique_ptr location = diff --git a/test/inspector/debugger/restore-breakpoint-expected.txt b/test/inspector/debugger/restore-breakpoint-expected.txt index ac23487bf8..dbcf6b434e 100644 --- a/test/inspector/debugger/restore-breakpoint-expected.txt +++ b/test/inspector/debugger/restore-breakpoint-expected.txt @@ -8,6 +8,42 @@ function foo() { #boo(); } +Running test: testSameSourceDuplicateLines +function foo() { +boo(); +// something +#boo(); +} +function foo() { +boo(); +// something +#boo(); +} + +Running test: testSameSourceDuplicateLinesLongLineBetween +function foo() { +boo(); +/////////////////////////////////////////////////////////////////////////////... +#boo(); +} +function foo() { +boo(); +/////////////////////////////////////////////////////////////////////////////... +#boo(); +} + +Running test: testSameSourceDuplicateLinesDifferentPrefix +function foo() { +boo(); +// something +#boo(); +} +function foo() { +#boo(); +// somethinX +boo(); +} + Running test: testOneLineOffset function foo() { #boo(); diff --git a/test/inspector/debugger/restore-breakpoint.js b/test/inspector/debugger/restore-breakpoint.js index 020143f6d1..1767d93a78 100644 --- a/test/inspector/debugger/restore-breakpoint.js +++ b/test/inspector/debugger/restore-breakpoint.js @@ -12,6 +12,23 @@ InspectorTest.runTestSuite([ test(source, source, { lineNumber: 1, columnNumber: 0 }, next); }, + function testSameSourceDuplicateLines(next) { + var source = 'function foo() {\nboo();\n// something\nboo();\n}'; + test(source, source, { lineNumber: 2, columnNumber: 0 }, next); + }, + + function testSameSourceDuplicateLinesLongLineBetween(next) { + var longComment = '/'.repeat(1e4); + var source = `function foo() {\nboo();\n${longComment}\nboo();\n}`; + test(source, source, { lineNumber: 2, columnNumber: 0 }, next); + }, + + function testSameSourceDuplicateLinesDifferentPrefix(next) { + var source = 'function foo() {\nboo();\n// something\nboo();\n}'; + var newSource = 'function foo() {\nboo();\n// somethinX\nboo();\n}'; + test(source, newSource, { lineNumber: 2, columnNumber: 0 }, next); + }, + function testOneLineOffset(next) { var source = 'function foo() {\nboo();\n}'; var newSource = 'function foo() {\nboo();\nboo();\n}'; From b2123b6a6014b2a961192815be50f8b071845250 Mon Sep 17 00:00:00 2001 From: Andreas Haas Date: Thu, 5 Jan 2023 17:33:29 +0100 Subject: [PATCH 181/654] [d8] Add missing call to {ResetOnProfileEndListener} R=clemensb@chromium.org Bug: chromium:1405157 Change-Id: I01d7b1f85034501cdf0441103f4308dcd6f7234d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4138252 Commit-Queue: Andreas Haas Reviewed-by: Clemens Backes Cr-Commit-Position: refs/heads/main@{#85132} --- src/d8/d8.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/d8/d8.cc b/src/d8/d8.cc index 70e6473d6d..a6f90c150c 100644 --- a/src/d8/d8.cc +++ b/src/d8/d8.cc @@ -2533,6 +2533,9 @@ bool Shell::HasOnProfileEndListener(Isolate* isolate) { } void Shell::ResetOnProfileEndListener(Isolate* isolate) { + // If the inspector is enabled, then the installed console is not the + // D8Console. + if (options.enable_inspector) return; profiler_end_callback_.erase(isolate); i::Isolate* i_isolate = reinterpret_cast(isolate); @@ -2970,6 +2973,7 @@ void Shell::QuitOnce(v8::FunctionCallbackInfo* args) { ->Int32Value(args->GetIsolate()->GetCurrentContext()) .FromMaybe(0); Isolate* isolate = args->GetIsolate(); + ResetOnProfileEndListener(isolate); isolate->Exit(); // As we exit the process anyway, we do not dispose the platform and other From cebcd8c51b3c7bcfb99d3db28d9f9a72ac307971 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 9 Jan 2023 08:48:09 +0000 Subject: [PATCH 182/654] Revert "[flags,testrunner] Consider readonly flags for conflict detection" This reverts commit ebd933037eb61bb3626675bdf2de800ba9f2518d. Reason for revert: Breaks a test: https://logs.chromium.org/logs/v8/buildbucket/cr-buildbucket/8792462319927467985/+/u/OptimizeForSize/CreateIsolateFromReadOnlySnapshot Original change's description: > [flags,testrunner] Consider readonly flags for conflict detection > > Flag conflict detection 1) bails out on incompatible flag values (e.g. > --jitless and --turbofan) and 2) handles such bailouts transparently in > the test runner by marking affected tests as OUTCOMES_FAIL. > > This CL adds full support for readonly flags to this system, together > with required additional annotations in variants.py. > > Drive-by: assert proper use of v8_enable_slow_dchecks, and add > support when dcheck_always_on is set. > Drive-by: introduce has_maglev build variable detection based on > v8_enable_maglev and use that for .status file annotations. > Drive-by: protect against unintended overwrites of build variables > in statusfile.py. > > Cq-Include-Trybots: luci.v8.try:v8_linux64_fyi_rel > Bug: v8:13629,v8:10577 > Change-Id: I04de399139a0490806df8bfee7e75e2ec767b4b5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4135879 > Reviewed-by: Tobias Tebbi > Reviewed-by: Victor Gomes > Commit-Queue: Jakob Linke > Cr-Commit-Position: refs/heads/main@{#85130} Bug: v8:13629,v8:10577 Change-Id: I0cb072c6c9f05d92894cc0af83c4d1a28df100d5 Cq-Include-Trybots: luci.v8.try:v8_linux64_fyi_rel No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4147098 Bot-Commit: Rubber Stamper Commit-Queue: Victor Gomes Reviewed-by: Victor Gomes Auto-Submit: Leszek Swirski Reviewed-by: Michael Achenbach Cr-Commit-Position: refs/heads/main@{#85133} --- BUILD.gn | 38 +++----- bazel/defs.bzl | 8 -- src/flags/flag-definitions.h | 30 +++---- src/flags/flags.cc | 86 +++---------------- src/flags/flags.h | 1 - src/runtime/runtime-test.cc | 3 +- test/mjsunit/mjsunit.status | 12 +-- tools/testrunner/base_runner.py | 21 ----- tools/testrunner/build_config.py | 55 +++++------- tools/testrunner/local/statusfile.py | 13 +-- tools/testrunner/local/variants.py | 83 ++++-------------- tools/testrunner/standard_runner_test.py | 22 ++--- .../testroot1/out/build/v8_build_config.json | 10 +-- .../testroot2/out/build/v8_build_config.json | 10 +-- .../testroot3/out/build/v8_build_config.json | 10 +-- .../out.gn/build/v8_build_config.json | 10 +-- .../testroot6/out/build/v8_build_config.json | 10 +-- 17 files changed, 100 insertions(+), 322 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index c02d018caa..e7b5853d0c 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -516,10 +516,6 @@ assert(!v8_enable_trace_ignition || v8_enable_trace_unoptimized, "Ignition tracing requires unoptimized tracing to be enabled.") assert(!v8_enable_trace_baseline_exec || v8_enable_trace_unoptimized, "Baseline tracing requires unoptimized tracing to be enabled.") -assert( - v8_enable_debugging_features == true || dcheck_always_on || - !v8_enable_slow_dchecks, - "v8_enable_slow_dchecks requires v8_enable_debugging_features or dcheck_always_on.") if (v8_enable_short_builtin_calls && (!v8_enable_pointer_compression && v8_current_cpu != "x64")) { @@ -1298,12 +1294,13 @@ config("toolchain") { if ((is_linux || is_chromeos) && v8_enable_backtrace) { ldflags += [ "-rdynamic" ] } - } - if (v8_enable_debugging_features || dcheck_always_on) { + defines += [ "DEBUG" ] if (v8_enable_slow_dchecks) { defines += [ "ENABLE_SLOW_DCHECKS" ] } + } else if (dcheck_always_on) { + defines += [ "DEBUG" ] } if (v8_enable_verify_csa) { @@ -2312,7 +2309,6 @@ action("v8_dump_build_config") { script = "tools/testrunner/utils/dump_build_config.py" outputs = [ "$root_out_dir/v8_build_config.json" ] is_gcov_coverage = v8_code_coverage && !is_clang - is_DEBUG_defined = v8_enable_debugging_features || dcheck_always_on is_full_debug = v8_enable_debugging_features && !v8_optimized_debug args = [ rebase_path("$root_out_dir/v8_build_config.json", root_build_dir), @@ -2324,44 +2320,38 @@ action("v8_dump_build_config") { "is_clang=$is_clang", "is_component_build=$is_component_build", "is_debug=$v8_enable_debugging_features", - "is_DEBUG_defined=$is_DEBUG_defined", "is_full_debug=$is_full_debug", "is_gcov_coverage=$is_gcov_coverage", "is_msan=$is_msan", "is_tsan=$is_tsan", "is_ubsan_vptr=$is_ubsan_vptr", "target_cpu=\"$target_cpu\"", - "v8_code_comments=$v8_code_comments", - "v8_control_flow_integrity=$v8_control_flow_integrity", "v8_current_cpu=\"$v8_current_cpu\"", - "v8_dict_property_const_tracking=$v8_dict_property_const_tracking", - "v8_disable_write_barriers=$v8_disable_write_barriers", "v8_enable_atomic_object_field_writes=" + "$v8_enable_atomic_object_field_writes", - "v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack", - "v8_enable_concurrent_marking=$v8_enable_concurrent_marking", "v8_enable_conservative_stack_scanning=" + "$v8_enable_conservative_stack_scanning", - "v8_enable_debug_code=$v8_enable_debug_code", - "v8_enable_disassembler=$v8_enable_disassembler", - "v8_enable_gdbjit=$v8_enable_gdbjit", + "v8_enable_concurrent_marking=$v8_enable_concurrent_marking", + "v8_enable_single_generation=$v8_enable_single_generation", "v8_enable_i18n_support=$v8_enable_i18n_support", + "v8_enable_verify_predictable=$v8_enable_verify_predictable", + "v8_enable_verify_csa=$v8_enable_verify_csa", "v8_enable_lite_mode=$v8_enable_lite_mode", - "v8_enable_maglev=$v8_enable_maglev", + "v8_enable_runtime_call_stats=$v8_enable_runtime_call_stats", "v8_enable_pointer_compression=$v8_enable_pointer_compression", "v8_enable_pointer_compression_shared_cage=" + "$v8_enable_pointer_compression_shared_cage", - "v8_enable_runtime_call_stats=$v8_enable_runtime_call_stats", "v8_enable_sandbox=$v8_enable_sandbox", "v8_enable_shared_ro_heap=$v8_enable_shared_ro_heap", - "v8_enable_single_generation=$v8_enable_single_generation", - "v8_enable_slow_dchecks=$v8_enable_slow_dchecks", + "v8_disable_write_barriers=$v8_disable_write_barriers", "v8_enable_third_party_heap=$v8_enable_third_party_heap", - "v8_enable_verify_csa=$v8_enable_verify_csa", - "v8_enable_verify_heap=$v8_enable_verify_heap", - "v8_enable_verify_predictable=$v8_enable_verify_predictable", "v8_enable_webassembly=$v8_enable_webassembly", + "v8_dict_property_const_tracking=$v8_dict_property_const_tracking", + "v8_control_flow_integrity=$v8_control_flow_integrity", "v8_target_cpu=\"$v8_target_cpu\"", + "v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack", + "v8_enable_verify_heap=$v8_enable_verify_heap", + "v8_enable_slow_dchecks=$v8_enable_slow_dchecks", ] if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { diff --git a/bazel/defs.bzl b/bazel/defs.bzl index facf35803e..870e7268df 100644 --- a/bazel/defs.bzl +++ b/bazel/defs.bzl @@ -535,14 +535,6 @@ def build_config_content(cpu, icu): ("v8_enable_shared_ro_heap", "false"), ("v8_disable_write_barriers", "false"), ("v8_target_cpu", cpu), - ("v8_code_comments", "false"), - ("v8_enable_debug_code", "false"), - ("v8_enable_verify_heap", "false"), - ("v8_enable_slow_dchecks", "false"), - ("v8_enable_maglev", "false"), - ("v8_enable_disassembler", "false"), - ("is_DEBUG_defined", "false"), - ("v8_enable_gdbjit", "false"), ]) # TODO(victorgomes): Create a rule (instead of a macro), that can diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index 4659c2df73..b795acb382 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -42,18 +42,12 @@ #elif defined(FLAG_MODE_DEFINE_DEFAULTS) #define FLAG_FULL(ftype, ctype, nam, def, cmt) \ static constexpr ctype FLAGDEFAULT_##nam{def}; -#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \ - static constexpr ctype FLAGDEFAULT_##nam{def}; // We want to write entries into our meta data table, for internal parsing and -// printing / etc in the flag parser code. +// printing / etc in the flag parser code. We only do this for writable flags. #elif defined(FLAG_MODE_META) #define FLAG_FULL(ftype, ctype, nam, def, cmt) \ {Flag::TYPE_##ftype, #nam, &v8_flags.nam, &FLAGDEFAULT_##nam, cmt, false}, -// Readonly flags don't pass the value pointer since the struct expects a -// mutable value. That's okay since the value always equals the default. -#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \ - {Flag::TYPE_##ftype, #nam, nullptr, &FLAGDEFAULT_##nam, cmt, false}, #define FLAG_ALIAS(ftype, ctype, alias, nam) \ {Flag::TYPE_##ftype, #alias, &v8_flags.nam, &FLAGDEFAULT_##nam, \ "alias for --" #nam, false}, // NOLINT(whitespace/indent) @@ -62,20 +56,20 @@ #elif defined(FLAG_MODE_DEFINE_IMPLICATIONS) #define DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value) \ changed |= TriggerImplication(v8_flags.whenflag, #whenflag, \ - &v8_flags.thenflag, #thenflag, value, false); + &v8_flags.thenflag, value, false); // A weak implication will be overwritten by a normal implication or by an // explicit flag. #define DEFINE_WEAK_VALUE_IMPLICATION(whenflag, thenflag, value) \ changed |= TriggerImplication(v8_flags.whenflag, #whenflag, \ - &v8_flags.thenflag, #thenflag, value, true); + &v8_flags.thenflag, value, true); #define DEFINE_GENERIC_IMPLICATION(whenflag, statement) \ if (v8_flags.whenflag) statement; #define DEFINE_NEG_VALUE_IMPLICATION(whenflag, thenflag, value) \ changed |= TriggerImplication(!v8_flags.whenflag, "!" #whenflag, \ - &v8_flags.thenflag, #thenflag, value, false); + &v8_flags.thenflag, value, false); // We apply a generic macro to the flags. #elif defined(FLAG_MODE_APPLY) @@ -778,7 +772,6 @@ DEFINE_BOOL( stress_concurrent_inlining, false, "create additional concurrent optimization jobs but throw away result") DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_recompilation) -DEFINE_IMPLICATION(stress_concurrent_inlining, turbofan) DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation) DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget, 15 * KB) @@ -2253,17 +2246,18 @@ DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space) DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory) // --perf-prof-unwinding-info is available only on selected architectures. -#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \ - V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_PPC64 +#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \ + !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 +#undef DEFINE_PERF_PROF_BOOL +#define DEFINE_PERF_PROF_BOOL(nam, cmt) DEFINE_BOOL_READONLY(nam, false, cmt) +#undef DEFINE_PERF_PROF_IMPLICATION +#define DEFINE_PERF_PROF_IMPLICATION(...) +#endif + DEFINE_PERF_PROF_BOOL( perf_prof_unwinding_info, "Enable unwinding info for perf linux profiler (experimental).") DEFINE_PERF_PROF_IMPLICATION(perf_prof, perf_prof_unwinding_info) -#else -DEFINE_BOOL_READONLY( - perf_prof_unwinding_info, false, - "Enable unwinding info for perf linux profiler (experimental).") -#endif #undef DEFINE_PERF_PROF_BOOL #undef DEFINE_PERF_PROF_IMPLICATION diff --git a/src/flags/flags.cc b/src/flags/flags.cc index 2b9ae1dffe..ab66eca43b 100644 --- a/src/flags/flags.cc +++ b/src/flags/flags.cc @@ -44,7 +44,6 @@ static_assert(sizeof(FlagValues) % kMinimumOSPageSize == 0); // Define all of our flags default values. #define FLAG_MODE_DEFINE_DEFAULTS #include "src/flags/flag-definitions.h" // NOLINT(build/include) -#undef FLAG_MODE_DEFINE_DEFAULTS namespace { @@ -92,10 +91,6 @@ struct Flag { enum class SetBy { kDefault, kWeakImplication, kImplication, kCommandLine }; - constexpr bool IsAnyImplication(Flag::SetBy set_by) { - return set_by == SetBy::kWeakImplication || set_by == SetBy::kImplication; - } - FlagType type_; // What type of flag, bool, int, or string. const char* name_; // Name of the flag, ex "my_flag". void* valptr_; // Pointer to the global flag variable. @@ -183,44 +178,39 @@ struct Flag { } } - template - T GetDefaultValue() const { - return *reinterpret_cast(defptr_); - } - bool bool_default() const { DCHECK_EQ(TYPE_BOOL, type_); - return GetDefaultValue(); + return *reinterpret_cast(defptr_); } int int_default() const { DCHECK_EQ(TYPE_INT, type_); - return GetDefaultValue(); + return *reinterpret_cast(defptr_); } unsigned int uint_default() const { DCHECK_EQ(TYPE_UINT, type_); - return GetDefaultValue(); + return *reinterpret_cast(defptr_); } uint64_t uint64_default() const { DCHECK_EQ(TYPE_UINT64, type_); - return GetDefaultValue(); + return *reinterpret_cast(defptr_); } double float_default() const { DCHECK_EQ(TYPE_FLOAT, type_); - return GetDefaultValue(); + return *reinterpret_cast(defptr_); } size_t size_t_default() const { DCHECK_EQ(TYPE_SIZE_T, type_); - return GetDefaultValue(); + return *reinterpret_cast(defptr_); } const char* string_default() const { DCHECK_EQ(TYPE_STRING, type_); - return GetDefaultValue(); + return *reinterpret_cast(defptr_); } static bool ShouldCheckFlagContradictions() { @@ -254,19 +244,6 @@ struct Flag { MSVC_SUPPRESS_WARNING(4722) ~FatalError() { FATAL("%s.\n%s", str().c_str(), kHint); } }; - // Readonly flags cannot change value. - if (change_flag && IsReadOnly()) { - // Exit instead of abort for certain testing situations. - if (v8_flags.exit_on_contradictory_flags) base::OS::ExitProcess(0); - if (implied_by == nullptr) { - FatalError{} << "Contradictory value for readonly flag " - << FlagName{name()}; - } else { - DCHECK(IsAnyImplication(new_set_by)); - FatalError{} << "Contradictory value for readonly flag " - << FlagName{name()} << " implied by " << implied_by; - } - } // For bool flags, we only check for a conflict if the value actually // changes. So specifying the same flag with the same value multiple times // is allowed. @@ -325,39 +302,28 @@ struct Flag { break; } } - if (change_flag && IsReadOnly()) { - // Readonly flags must never change value. - return false; - } set_by_ = new_set_by; - if (IsAnyImplication(new_set_by)) { + if (new_set_by == SetBy::kImplication || + new_set_by == SetBy::kWeakImplication) { DCHECK_NOT_NULL(implied_by); implied_by_ = implied_by; } return change_flag; } - bool IsReadOnly() const { - // See the FLAG_READONLY definition for FLAG_MODE_META. - return valptr_ == nullptr; - } - template T GetValue() const { DCHECK_EQ(flag_type, type_); - if (IsReadOnly()) return GetDefaultValue(); return *reinterpret_cast*>(valptr_); } template void SetValue(T new_value, SetBy set_by) { DCHECK_EQ(flag_type, type_); - bool change_flag = GetValue() != new_value; + auto* flag_value = reinterpret_cast*>(valptr_); + bool change_flag = flag_value->value() != new_value; change_flag = CheckFlagChange(set_by, change_flag); - if (change_flag) { - DCHECK(!IsReadOnly()); - *reinterpret_cast*>(valptr_) = new_value; - } + if (change_flag) *flag_value = new_value; } // Compare this flag's current value against the default. @@ -429,7 +395,6 @@ struct Flag { Flag flags[] = { #define FLAG_MODE_META #include "src/flags/flag-definitions.h" // NOLINT(build/include) -#undef FLAG_MODE_META }; constexpr size_t kNumFlags = arraysize(flags); @@ -886,11 +851,10 @@ class ImplicationProcessor { // Called from {DEFINE_*_IMPLICATION} in flag-definitions.h. template bool TriggerImplication(bool premise, const char* premise_name, - FlagValue* conclusion_value, - const char* conclusion_name, T value, + FlagValue* conclusion_value, T value, bool weak_implication) { if (!premise) return false; - Flag* conclusion_flag = FindFlagByName(conclusion_name); + Flag* conclusion_flag = FindFlagByPointer(conclusion_value); if (!conclusion_flag->CheckFlagChange( weak_implication ? Flag::SetBy::kWeakImplication : Flag::SetBy::kImplication, @@ -909,28 +873,6 @@ class ImplicationProcessor { return true; } - // Called from {DEFINE_*_IMPLICATION} in flag-definitions.h. - template - bool TriggerImplication(bool premise, const char* premise_name, - const FlagValue* conclusion_value, - const char* conclusion_name, T value, - bool weak_implication) { - if (!premise) return false; - Flag* conclusion_flag = FindFlagByName(conclusion_name); - // Because this is the `const FlagValue*` overload: - DCHECK(conclusion_flag->IsReadOnly()); - if (!conclusion_flag->CheckFlagChange( - weak_implication ? Flag::SetBy::kWeakImplication - : Flag::SetBy::kImplication, - conclusion_value->value() != value, premise_name)) { - return false; - } - // Must equal the default value, otherwise CheckFlagChange should've - // returned false. - DCHECK_EQ(value, conclusion_flag->GetDefaultValue()); - return true; - } - void CheckForCycle() { // Make sure flag implications reach a fixed point within // {kMaxNumIterations} iterations. diff --git a/src/flags/flags.h b/src/flags/flags.h index 18446c78bf..690492f078 100644 --- a/src/flags/flags.h +++ b/src/flags/flags.h @@ -66,7 +66,6 @@ struct alignas(kMinimumOSPageSize) FlagValues { #define FLAG_MODE_DECLARE #include "src/flags/flag-definitions.h" // NOLINT(build/include) -#undef FLAG_MODE_DECLARE }; V8_EXPORT_PRIVATE extern FlagValues v8_flags; diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc index a7a8f64cae..7e2fa20a00 100644 --- a/src/runtime/runtime-test.cc +++ b/src/runtime/runtime-test.cc @@ -265,8 +265,7 @@ bool CanOptimizeFunction(CodeKind target_kind, Handle function, return CrashUnlessFuzzingReturnFalse(isolate); } - if (target_kind == CodeKind::TURBOFAN && !v8_flags.turbofan) return false; - if (target_kind == CodeKind::MAGLEV && !v8_flags.maglev) return false; + if (!v8_flags.turbofan) return false; if (function->shared().optimization_disabled() && function->shared().disabled_optimization_reason() == diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index a800b8fb7c..21b9724c18 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -213,12 +213,6 @@ # Needs deterministic test helpers for concurrent maglev tiering. # TODO(jgruber,v8:7700): Implement ASAP. 'maglev/18': [SKIP], - - # --perf-prof is only available on Linux, and --perf-prof-unwinding-info only - # on selected architectures. - 'regress/wasm/regress-1032753': [PASS, ['system != linux', SKIP]], - 'regress/regress-913844': [PASS, - ['system != linux or arch not in (arm, arm64, x64, s390x, ppc64)', SKIP]], }], # ALWAYS ############################################################################## @@ -1330,9 +1324,11 @@ }], # no_harness ############################################################################## -['not has_maglev', { +['arch != x64 or not pointer_compression or variant in (nooptimization, jitless)', { + # Maglev is x64-only for now. + # TODO(v8:7700): Update as we extend support. 'maglev/*': [SKIP], -}], # not has_maglev +}], # arch != x64 or not pointer_compression or variant in (nooptimization, jitless) ############################################################################## ['arch != x64 or deopt_fuzzer', { diff --git a/tools/testrunner/base_runner.py b/tools/testrunner/base_runner.py index 89bb8ae90e..a88d6fec05 100644 --- a/tools/testrunner/base_runner.py +++ b/tools/testrunner/base_runner.py @@ -552,8 +552,6 @@ class BaseTestRunner(object): sys.byteorder, "cfi_vptr": self.build_config.cfi_vptr, - "code_comments": - self.build_config.code_comments, "component_build": self.build_config.component_build, "conservative_stack_scanning": @@ -566,12 +564,8 @@ class BaseTestRunner(object): self.build_config.single_generation, "dcheck_always_on": self.build_config.dcheck_always_on, - "debug_code": - self.build_config.debug_code, "deopt_fuzzer": False, - "disassembler": - self.build_config.disassembler, "endurance_fuzzer": False, "gc_fuzzer": @@ -580,23 +574,12 @@ class BaseTestRunner(object): False, "gcov_coverage": self.build_config.gcov_coverage, - "gdbjit": - self.build_config.gdbjit, - # TODO(jgruber): Note this rename from maglev to has_maglev is required - # to avoid a name clash with the "maglev" variant. See also the TODO in - # statusfile.py (this really shouldn't be needed). - "has_maglev": - self.build_config.maglev, "has_webassembly": self.build_config.webassembly, "isolates": self.options.isolates, "is_clang": self.build_config.is_clang, - "is_debug": - self.build_config.is_debug, - "is_DEBUG_defined": - self.build_config.is_DEBUG_defined, "is_full_debug": self.build_config.is_full_debug, "interrupt_fuzzer": @@ -624,8 +607,6 @@ class BaseTestRunner(object): "simulator_run": self.build_config.simulator_run and not self.options.dont_skip_simulator_slow_tests, - "slow_dchecks": - self.build_config.slow_dchecks, "system": self.target_os, "third_party_heap": @@ -636,8 +617,6 @@ class BaseTestRunner(object): self.build_config.ubsan_vptr, "verify_csa": self.build_config.verify_csa, - "verify_heap": - self.build_config.verify_heap, "lite_mode": self.build_config.lite_mode, "pointer_compression": diff --git a/tools/testrunner/build_config.py b/tools/testrunner/build_config.py index 10b5749c5f..cdc11681f8 100644 --- a/tools/testrunner/build_config.py +++ b/tools/testrunner/build_config.py @@ -23,46 +23,40 @@ class BuildConfig(object): self.asan = build_config['is_asan'] self.cfi_vptr = build_config['is_cfi'] - self.code_comments = build_config['v8_code_comments'] self.component_build = build_config['is_component_build'] - self.concurrent_marking = build_config['v8_enable_concurrent_marking'] self.conservative_stack_scanning = build_config[ 'v8_enable_conservative_stack_scanning'] self.control_flow_integrity = build_config['v8_control_flow_integrity'] + self.concurrent_marking = build_config['v8_enable_concurrent_marking'] + self.single_generation = build_config['v8_enable_single_generation'] self.dcheck_always_on = build_config['dcheck_always_on'] - self.debug_code = build_config['v8_enable_debug_code'] - self.dict_property_const_tracking = build_config[ - 'v8_dict_property_const_tracking'] - self.disassembler = build_config['v8_enable_disassembler'] self.gcov_coverage = build_config['is_gcov_coverage'] - self.gdbjit = build_config['v8_enable_gdbjit'] self.is_android = build_config['is_android'] self.is_clang = build_config['is_clang'] self.is_debug = build_config['is_debug'] - self.is_DEBUG_defined = build_config['is_DEBUG_defined'] self.is_full_debug = build_config['is_full_debug'] - self.lite_mode = build_config['v8_enable_lite_mode'] - self.maglev = build_config['v8_enable_maglev'] self.msan = build_config['is_msan'] self.no_i18n = not build_config['v8_enable_i18n_support'] - self.pointer_compression = build_config['v8_enable_pointer_compression'] - self.pointer_compression_shared_cage = build_config[ - 'v8_enable_pointer_compression_shared_cage'] self.predictable = build_config['v8_enable_verify_predictable'] - self.sandbox = build_config['v8_enable_sandbox'] - self.shared_ro_heap = build_config['v8_enable_shared_ro_heap'] self.simulator_run = ( build_config['target_cpu'] != build_config['v8_target_cpu']) - self.single_generation = build_config['v8_enable_single_generation'] - self.slow_dchecks = build_config['v8_enable_slow_dchecks'] - self.third_party_heap = build_config['v8_enable_third_party_heap'] self.tsan = build_config['is_tsan'] # TODO(machenbach): We only have ubsan not ubsan_vptr. self.ubsan_vptr = build_config['is_ubsan_vptr'] self.verify_csa = build_config['v8_enable_verify_csa'] self.verify_heap = build_config['v8_enable_verify_heap'] - self.webassembly = build_config['v8_enable_webassembly'] + self.slow_dchecks = build_config['v8_enable_slow_dchecks'] + self.lite_mode = build_config['v8_enable_lite_mode'] + self.pointer_compression = build_config['v8_enable_pointer_compression'] + self.pointer_compression_shared_cage = build_config[ + 'v8_enable_pointer_compression_shared_cage'] + self.shared_ro_heap = build_config['v8_enable_shared_ro_heap'] self.write_barriers = not build_config['v8_disable_write_barriers'] + self.sandbox = build_config['v8_enable_sandbox'] + self.third_party_heap = build_config['v8_enable_third_party_heap'] + self.webassembly = build_config['v8_enable_webassembly'] + self.dict_property_const_tracking = build_config[ + 'v8_dict_property_const_tracking'] # Export only for MIPS target if self.arch in ['mips64', 'mips64el']: self._mips_arch_variant = build_config['mips_arch_variant'] @@ -144,31 +138,24 @@ class BuildConfig(object): attrs = [ 'asan', 'cfi_vptr', - 'code_comments', 'control_flow_integrity', 'dcheck_always_on', - 'debug_code', - 'dict_property_const_tracking', - 'disassembler', 'gcov_coverage', - 'gdbjit', - 'is_debug', - 'is_DEBUG_defined', - 'lite_mode', - 'maglev', 'msan', 'no_i18n', - 'pointer_compression', - 'pointer_compression_shared_cage', 'predictable', - 'sandbox', - 'slow_dchecks', - 'third_party_heap', 'tsan', 'ubsan_vptr', 'verify_csa', - 'verify_heap', + 'lite_mode', + 'pointer_compression', + 'pointer_compression_shared_cage', + 'sandbox', + 'third_party_heap', 'webassembly', + 'dict_property_const_tracking', + 'verify_heap', + 'slow_dchecks', ] detected_options = [attr for attr in attrs if getattr(self, attr, False)] return '\n'.join(detected_options) diff --git a/tools/testrunner/local/statusfile.py b/tools/testrunner/local/statusfile.py index 04485936b6..5f9766e85c 100644 --- a/tools/testrunner/local/statusfile.py +++ b/tools/testrunner/local/statusfile.py @@ -63,12 +63,10 @@ for var in [ "windows", "linux", "aix", "r1", "r2", "r3", "r5", "r6", "riscv32", "riscv64", "loong64" ]: - assert var not in VARIABLES VARIABLES[var] = var # Allow using variants as keywords. for var in ALL_VARIANTS: - assert var not in VARIABLES VARIABLES[var] = var class StatusFile(object): @@ -246,16 +244,7 @@ def ReadStatusFile(content, variables): prefix_rules = {variant: {} for variant in ALL_VARIANTS} prefix_rules[""] = {} - # This method can be called with the same `variables` object multiple times. - # Ensure we only update `variables` (and check it for consistency) once. - if ALWAYS not in variables: - # Ensure we don't silently overwrite any build variables with our set of - # default keywords in VARIABLES. - for var in VARIABLES: - assert var not in variables, ( - "build_config variable '%s' conflicts with VARIABLES" % var) - variables.update(VARIABLES) - + variables.update(VARIABLES) for conditional_section in ReadContent(content): assert type(conditional_section) == list assert len(conditional_section) == 2 diff --git a/tools/testrunner/local/variants.py b/tools/testrunner/local/variants.py index d66f430c8b..31da71cf6d 100644 --- a/tools/testrunner/local/variants.py +++ b/tools/testrunner/local/variants.py @@ -58,35 +58,23 @@ ALL_VARIANT_FLAGS = { "google3_noicu": [[]], } -# Note these are specifically for the case when Turbofan is either fully -# disabled (i.e. not part of the binary), or when all codegen is disallowed (in -# jitless mode). -kIncompatibleFlagsForNoTurbofan = [ - "--turbofan", "--always-turbofan", "--liftoff", "--validate-asm", - "--maglev", "--stress-concurrent-inlining" -] - # Flags that lead to a contradiction with the flags provided by the respective # variant. This depends on the flags specified in ALL_VARIANT_FLAGS and on the # implications defined in flag-definitions.h. INCOMPATIBLE_FLAGS_PER_VARIANT = { - "jitless": - kIncompatibleFlagsForNoTurbofan + [ - "--track-field-types", "--sparkplug", "--concurrent-sparkplug", - "--always-sparkplug", "--regexp-tier-up", - "--no-regexp-interpret-all" - ], - "nooptimization": [ - "--turbofan", "--always-turbofan", "--stress-concurrent-inlining" + "jitless": [ + "--turbofan", "--always-turbofan", "--liftoff", "--track-field-types", + "--validate-asm", "--sparkplug", "--concurrent-sparkplug", "--maglev", + "--always-sparkplug", "--regexp-tier-up", "--no-regexp-interpret-all" ], + "nooptimization": ["--always-turbofan"], "slow_path": ["--no-force-slow-path"], "stress_concurrent_allocation": [ "--single-threaded", "--single-threaded-gc", "--predictable" ], "stress_concurrent_inlining": [ "--single-threaded", "--predictable", "--lazy-feedback-allocation", - "--assert-types", "--no-concurrent-recompilation", "--no-turbofan", - "--jitless" + "--assert-types", "--no-concurrent-recompilation" ], # The fast API tests initialize an embedder object that never needs to be # serialized to the snapshot, so we don't have a @@ -123,55 +111,16 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = { # # applies when the code_comments build variable is NOT set. INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = { - "!code_comments": ["--code-comments"], - "!is_DEBUG_defined": [ - "--check_handle_count", - "--code_stats", - "--dump_wasm_module", - "--enable_testing_opcode_in_wasm", - "--gc_verbose", - "--print_ast", - "--print_break_location", - "--print_global_handles", - "--print_handles", - "--print_scopes", - "--regexp_possessive_quantifier", - "--trace_backing_store", - "--trace_contexts", - "--trace_isolates", - "--trace_lazy", - "--trace_liftoff", - "--trace_module_status", - "--trace_normalization", - "--trace_turbo_escape", - "--trace_wasm_compiler", - "--trace_wasm_decoder", - "--trace_wasm_instances", - "--trace_wasm_interpreter", - "--trace_wasm_lazy_compilation", - "--trace_wasm_native_heap", - "--trace_wasm_serialization", - "--trace_wasm_stack_switching", - "--trace_wasm_streaming", - "--trap_on_abort", - ], - "!verify_heap": ["--verify-heap"], - "!debug_code": ["--debug-code"], - "!disassembler": [ - "--print_all_code", "--print_code", "--print_opt_code", - "--print_code_verbose", "--print_builtin_code", "--print_regexp_code" - ], - "!slow_dchecks": ["--enable-slow-asserts"], - "!gdbjit": ["--gdbjit", "--gdbjit_full", "--gdbjit_dump"], - "!maglev": ["--maglev"], - "lite_mode": ["--no-lazy-feedback-allocation", "--max-semi-space-size=*"] + - INCOMPATIBLE_FLAGS_PER_VARIANT["jitless"], - "predictable": [ - "--parallel-compile-tasks-for-eager-toplevel", - "--parallel-compile-tasks-for-lazy", "--concurrent-recompilation", - "--stress-concurrent-allocation", "--stress-concurrent-inlining" - ], - "dict_property_const_tracking": ["--stress-concurrent-inlining"], + "lite_mode": ["--no-lazy-feedback-allocation", "--max-semi-space-size=*", + "--stress-concurrent-inlining"] + + INCOMPATIBLE_FLAGS_PER_VARIANT["jitless"], + "predictable": ["--parallel-compile-tasks-for-eager-toplevel", + "--parallel-compile-tasks-for-lazy", + "--concurrent-recompilation", + "--stress-concurrent-allocation", + "--stress-concurrent-inlining"], + "dict_property_const_tracking": [ + "--stress-concurrent-inlining"], } # Flags that lead to a contradiction when a certain extra-flag is present. diff --git a/tools/testrunner/standard_runner_test.py b/tools/testrunner/standard_runner_test.py index 770a29bf88..65d98b65ed 100644 --- a/tools/testrunner/standard_runner_test.py +++ b/tools/testrunner/standard_runner_test.py @@ -231,16 +231,18 @@ class StandardRunnerTest(TestRunnerTest): v8_enable_sandbox=False ) ) - result.stdout_includes('>>> Autodetected:') - result.stdout_includes('asan') - result.stdout_includes('cfi_vptr') - result.stdout_includes('dcheck_always_on') - result.stdout_includes('msan') - result.stdout_includes('no_i18n') - result.stdout_includes('tsan') - result.stdout_includes('ubsan_vptr') - result.stdout_includes('webassembly') - result.stdout_includes('>>> Running tests for ia32.release') + expect_text = ( + '>>> Autodetected:\n' + 'asan\n' + 'cfi_vptr\n' + 'dcheck_always_on\n' + 'msan\n' + 'no_i18n\n' + 'tsan\n' + 'ubsan_vptr\n' + 'webassembly\n' + '>>> Running tests for ia32.release') + result.stdout_includes(expect_text) result.has_returncode(0) # TODO(machenbach): Test some more implications of the auto-detected # options, e.g. that the right env variables are set. diff --git a/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json index c4aa78f7f5..9f1743780e 100644 --- a/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json @@ -32,13 +32,5 @@ "v8_enable_single_generation": false, "v8_enable_third_party_heap": false, "v8_enable_webassembly": true, - "v8_dict_property_const_tracking": false, - "v8_code_comments": false, - "v8_enable_debug_code": false, - "v8_enable_verify_heap": false, - "v8_enable_slow_dchecks": false, - "v8_enable_maglev": false, - "v8_enable_disassembler": false, - "is_DEBUG_defined": false, - "v8_enable_gdbjit": false + "v8_dict_property_const_tracking": false } diff --git a/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json index 3ad5534b05..a0b2cb87b4 100644 --- a/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json @@ -32,13 +32,5 @@ "v8_enable_single_generation": false, "v8_enable_third_party_heap": false, "v8_enable_webassembly": true, - "v8_dict_property_const_tracking": false, - "v8_code_comments": false, - "v8_enable_debug_code": false, - "v8_enable_verify_heap": false, - "v8_enable_slow_dchecks": false, - "v8_enable_maglev": false, - "v8_enable_disassembler": false, - "is_DEBUG_defined": false, - "v8_enable_gdbjit": false + "v8_dict_property_const_tracking": false } diff --git a/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json index c4aa78f7f5..9f1743780e 100644 --- a/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json @@ -32,13 +32,5 @@ "v8_enable_single_generation": false, "v8_enable_third_party_heap": false, "v8_enable_webassembly": true, - "v8_dict_property_const_tracking": false, - "v8_code_comments": false, - "v8_enable_debug_code": false, - "v8_enable_verify_heap": false, - "v8_enable_slow_dchecks": false, - "v8_enable_maglev": false, - "v8_enable_disassembler": false, - "is_DEBUG_defined": false, - "v8_enable_gdbjit": false + "v8_dict_property_const_tracking": false } diff --git a/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json b/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json index c4aa78f7f5..9f1743780e 100644 --- a/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json @@ -32,13 +32,5 @@ "v8_enable_single_generation": false, "v8_enable_third_party_heap": false, "v8_enable_webassembly": true, - "v8_dict_property_const_tracking": false, - "v8_code_comments": false, - "v8_enable_debug_code": false, - "v8_enable_verify_heap": false, - "v8_enable_slow_dchecks": false, - "v8_enable_maglev": false, - "v8_enable_disassembler": false, - "is_DEBUG_defined": false, - "v8_enable_gdbjit": false + "v8_dict_property_const_tracking": false } diff --git a/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json b/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json index c4aa78f7f5..9f1743780e 100644 --- a/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json +++ b/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json @@ -32,13 +32,5 @@ "v8_enable_single_generation": false, "v8_enable_third_party_heap": false, "v8_enable_webassembly": true, - "v8_dict_property_const_tracking": false, - "v8_code_comments": false, - "v8_enable_debug_code": false, - "v8_enable_verify_heap": false, - "v8_enable_slow_dchecks": false, - "v8_enable_maglev": false, - "v8_enable_disassembler": false, - "is_DEBUG_defined": false, - "v8_enable_gdbjit": false + "v8_dict_property_const_tracking": false } From 521a399d357660424a7d234914c3fcb8457697f3 Mon Sep 17 00:00:00 2001 From: Darius M Date: Mon, 9 Jan 2023 08:44:28 +0100 Subject: [PATCH 183/654] [maglev] Temporarily disable in-heap Typed Array support Commit https://chromium.googlesource.com/v8/v8/+/5d3e12941e3787a5d6ce0eaeb0bc7503a331791c introduced support for in-heap Typed Arrays in Maglev. This is causing a bug in the register allocator, that is taking me a while to fix. I'm thus temporarily disabled this in-heap Typed Array support until I've fixed the register allocator bug. Fixed: v8:13639 Bug: v8:7700 Change-Id: Ic121bafcd22e248a5a340baec7d10a265a5a711a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4146422 Reviewed-by: Victor Gomes Auto-Submit: Darius Mercadier Commit-Queue: Victor Gomes Cr-Commit-Position: refs/heads/main@{#85134} --- src/maglev/maglev-graph-builder.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index 86a462913f..d313cae0f5 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -2213,6 +2213,10 @@ bool MaglevGraphBuilder::TryBuildElementAccess( // TODO(victorgomes): Support more elements kind. ElementsKind elements_kind = access_info.elements_kind(); if (IsTypedArrayElementsKind(elements_kind)) { + if (JSTypedArray::kMaxSizeInHeap != 0) { + // TODO(dmercadier): re-enable support for in-heap Typed Arrays. + return false; + } if (elements_kind == BIGUINT64_ELEMENTS || elements_kind == BIGINT64_ELEMENTS) { return false; From 0ab8a7a111d3ba4a3b22dbc4b6fb472eddeaead9 Mon Sep 17 00:00:00 2001 From: Manos Koukoutos Date: Mon, 2 Jan 2023 13:01:40 +0100 Subject: [PATCH 184/654] [wasm-gc][liftoff] Optimize final-type checks This is a follow-up to crrev.com/c/v8/v8/+/4096478. Similarly to Turbofan, we reduce type-checks for final types in Liftoff to type identity. Bug: v8:7748 Change-Id: I095880a7718bd2d675dd119f1f14869c97d641b7 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4128522 Reviewed-by: Matthias Liedtke Commit-Queue: Manos Koukoutos Cr-Commit-Position: refs/heads/main@{#85135} --- src/wasm/baseline/liftoff-compiler.cc | 76 +++++++++++++++------------ 1 file changed, 41 insertions(+), 35 deletions(-) diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index 970fa7fe2f..be819849c0 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -5943,44 +5943,50 @@ class LiftoffCompiler { __ LoadMap(tmp1, obj_reg); // {tmp1} now holds the object's map. - // Check for rtt equality, and if not, check if the rtt is a struct/array - // rtt. - __ emit_cond_jump(kEqual, &match, rtt_type.kind(), tmp1, rtt_reg, frozen); + if (module->types[rtt_type.ref_index()].is_final) { + // In this case, simply check for map equality. + __ emit_cond_jump(kUnequal, no_match, rtt_type.kind(), tmp1, rtt_reg, + frozen); + } else { + // Check for rtt equality, and if not, check if the rtt is a struct/array + // rtt. + __ emit_cond_jump(kEqual, &match, rtt_type.kind(), tmp1, rtt_reg, frozen); - if (is_cast_from_any) { - // Check for map being a map for a wasm object (struct, array, func). - __ Load(LiftoffRegister(scratch2), tmp1, no_reg, - wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset), - LoadType::kI32Load16U); - __ emit_i32_subi(scratch2, scratch2, FIRST_WASM_OBJECT_TYPE); - __ emit_i32_cond_jumpi(kUnsignedGreaterThan, no_match, scratch2, - LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE, - frozen); - } + if (is_cast_from_any) { + // Check for map being a map for a wasm object (struct, array, func). + __ Load(LiftoffRegister(scratch2), tmp1, no_reg, + wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset), + LoadType::kI32Load16U); + __ emit_i32_subi(scratch2, scratch2, FIRST_WASM_OBJECT_TYPE); + __ emit_i32_cond_jumpi(kUnsignedGreaterThan, no_match, scratch2, + LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE, + frozen); + } - // Constant-time subtyping check: load exactly one candidate RTT from the - // supertypes list. - // Step 1: load the WasmTypeInfo into {tmp1}. - constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged( - Map::kConstructorOrBackPointerOrNativeContextOffset); - __ LoadTaggedPointer(tmp1, tmp1, no_reg, kTypeInfoOffset); - // Step 2: check the list's length if needed. - uint32_t rtt_depth = GetSubtypingDepth(module, rtt_type.ref_index()); - if (rtt_depth >= kMinimumSupertypeArraySize) { - LiftoffRegister list_length(scratch2); - int offset = - ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesLengthOffset); - __ LoadSmiAsInt32(list_length, tmp1, offset); - __ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(), - rtt_depth, frozen); + // Constant-time subtyping check: load exactly one candidate RTT from the + // supertypes list. + // Step 1: load the WasmTypeInfo into {tmp1}. + constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged( + Map::kConstructorOrBackPointerOrNativeContextOffset); + __ LoadTaggedPointer(tmp1, tmp1, no_reg, kTypeInfoOffset); + // Step 2: check the list's length if needed. + uint32_t rtt_depth = GetSubtypingDepth(module, rtt_type.ref_index()); + if (rtt_depth >= kMinimumSupertypeArraySize) { + LiftoffRegister list_length(scratch2); + int offset = + ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesLengthOffset); + __ LoadSmiAsInt32(list_length, tmp1, offset); + __ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(), + rtt_depth, frozen); + } + // Step 3: load the candidate list slot into {tmp1}, and compare it. + __ LoadTaggedPointer( + tmp1, tmp1, no_reg, + ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset + + rtt_depth * kTaggedSize)); + __ emit_cond_jump(kUnequal, no_match, rtt_type.kind(), tmp1, rtt_reg, + frozen); } - // Step 3: load the candidate list slot into {tmp1}, and compare it. - __ LoadTaggedPointer( - tmp1, tmp1, no_reg, - ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset + - rtt_depth * kTaggedSize)); - __ emit_cond_jump(kUnequal, no_match, rtt_type.kind(), tmp1, rtt_reg, - frozen); // Fall through to {match}. __ bind(&match); From 3172b30fe49a35155790755e29a8f55f69f9eba8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Inf=C3=BChr?= Date: Thu, 22 Dec 2022 15:27:49 +0100 Subject: [PATCH 185/654] [execution, heap] Lock global safepoint mutex in Isolate::Deinit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This CL locks the global safepoint mutex during Isolate::Deinit when the shared heap is used. This prevents any shared GC between starting isolate tear down and detaching from the shared heap isolate. Not doing that resulted in deadlocks when the isolate's main thread was blocking until background tasks finished while still being in the running state. It also solves the heap verification failures when one client isolate stopped right before detaching from the shared heap isolate for a shared GC. In this case the external string table was already finalized. This CL ensures that there is no GC in-between these two operations anymore. Bug: v8:13267, chromium:1401078 Change-Id: I131bcf1506eb8d756e0092139b638fae051b902d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4120442 Reviewed-by: Michael Lippautz Commit-Queue: Dominik Inführ Cr-Commit-Position: refs/heads/main@{#85136} --- src/execution/isolate.cc | 35 +++++++++++++++++++---------------- src/heap/safepoint.cc | 25 +++++++++++++------------ 2 files changed, 32 insertions(+), 28 deletions(-) diff --git a/src/execution/isolate.cc b/src/execution/isolate.cc index bc0e4bfb45..de0960783e 100644 --- a/src/execution/isolate.cc +++ b/src/execution/isolate.cc @@ -3554,7 +3554,20 @@ void Isolate::UpdateLogObjectRelocation() { void Isolate::Deinit() { TRACE_ISOLATE(deinit); - DisallowGarbageCollection disallow_gc; + + // All client isolates should already be detached when the shared heap isolate + // tears down. + if (is_shared_heap_isolate()) { + global_safepoint()->AssertNoClientsOnTearDown(); + } + + if (has_shared_heap()) { + IgnoreLocalGCRequests ignore_gc_requests(heap()); + ParkedScope parked_scope(main_thread_local_heap()); + shared_heap_isolate()->global_safepoint()->clients_mutex_.Lock(); + } + + DisallowGarbageCollection no_gc; tracing_cpu_profiler_.reset(); if (v8_flags.stress_sampling_allocation_profiler > 0) { @@ -3590,11 +3603,6 @@ void Isolate::Deinit() { optimizing_compile_dispatcher_ = nullptr; } - // All client isolates should already be detached. - if (is_shared()) { - global_safepoint()->AssertNoClientsOnTearDown(); - } - if (v8_flags.print_deopt_stress) { PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_); } @@ -3631,20 +3639,15 @@ void Isolate::Deinit() { // At this point there are no more background threads left in this isolate. heap_.safepoint()->AssertMainThreadIsOnlyThread(); - // Tear down data using the shared heap before detaching. + // Tear down data that requires the shared heap before detaching. heap_.TearDownWithSharedHeap(); - { - // This isolate might have to park for a shared GC initiated by another - // client isolate before it can actually detach from the shared isolate. - AllowGarbageCollection allow_shared_gc; + // Detach from the shared heap isolate and then unlock the mutex. + if (has_shared_heap()) { + Isolate* shared_heap_isolate = this->shared_heap_isolate(); DetachFromSharedIsolate(); DetachFromSharedSpaceIsolate(); - } - - // All client isolates should already be detached. - if (is_shared_space_isolate()) { - global_safepoint()->AssertNoClientsOnTearDown(); + shared_heap_isolate->global_safepoint()->clients_mutex_.Unlock(); } // Since there are no other threads left, we can lock this mutex without any diff --git a/src/heap/safepoint.cc b/src/heap/safepoint.cc index 87a40837d0..12b0882b43 100644 --- a/src/heap/safepoint.cc +++ b/src/heap/safepoint.cc @@ -312,12 +312,7 @@ void GlobalSafepoint::AppendClient(Isolate* client) { void GlobalSafepoint::RemoveClient(Isolate* client) { DCHECK_EQ(client->heap()->gc_state(), Heap::TEAR_DOWN); - - // A shared heap may have already acquired the client mutex to perform a - // shared GC. We need to park the Isolate here to allow for a shared GC. - IgnoreLocalGCRequests ignore_gc_requests(client->heap()); - ParkedRecursiveMutexGuard guard(client->main_thread_local_heap(), - &clients_mutex_); + AssertActive(); if (client->global_safepoint_next_client_isolate_) { client->global_safepoint_next_client_isolate_ @@ -338,12 +333,18 @@ void GlobalSafepoint::RemoveClient(Isolate* client) { } void GlobalSafepoint::AssertNoClientsOnTearDown() { - DCHECK_WITH_MSG( - clients_head_ == nullptr, - "Shared heap must not have clients at teardown. The first isolate that " - "is created (in a process that has no isolates) owns the lifetime of the " - "shared heap and is considered the main isolate. The main isolate must " - "outlive all other isolates."); + if (v8_flags.shared_space) { + DCHECK_EQ(clients_head_, shared_heap_isolate_); + DCHECK_NULL(shared_heap_isolate_->global_safepoint_prev_client_isolate_); + DCHECK_NULL(shared_heap_isolate_->global_safepoint_next_client_isolate_); + } else { + DCHECK_WITH_MSG( + clients_head_ == nullptr, + "Shared heap must not have clients at teardown. The first isolate that " + "is created (in a process that has no isolates) owns the lifetime of " + "the shared heap and is considered the main isolate. The main isolate " + "must outlive all other isolates."); + } } void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) { From 302892032ca54b236703d0f4f5a499a0e95beced Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Thu, 5 Jan 2023 09:42:13 +0100 Subject: [PATCH 186/654] [factory] Remove dead code in NewOffHeapTrampolineFor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit .. and restructure a bit. The V8_EXTERNAL_CODE_SPACE case is fully handled in the initial code section (thus dead code further down can be removed). Also, no need to guard both through an #ifdef and an `if`. Change-Id: Ibc56bc5922908e7a73f26a2799ac29287336cb3a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4136721 Auto-Submit: Jakob Linke Reviewed-by: Dominik Inführ Commit-Queue: Dominik Inführ Cr-Commit-Position: refs/heads/main@{#85137} --- src/heap/factory.cc | 41 ++++++++++++++--------------------------- 1 file changed, 14 insertions(+), 27 deletions(-) diff --git a/src/heap/factory.cc b/src/heap/factory.cc index c89fb56ba0..4e8a64d155 100644 --- a/src/heap/factory.cc +++ b/src/heap/factory.cc @@ -2504,23 +2504,20 @@ Handle Factory::NewOffHeapTrampolineFor(Handle code, CHECK(Builtins::IsIsolateIndependentBuiltin(*code)); #ifdef V8_EXTERNAL_CODE_SPACE - if (V8_EXTERNAL_CODE_SPACE_BOOL) { - const int no_flags = 0; - Handle code_data_container = - NewCodeDataContainer(no_flags, AllocationType::kOld); - - const bool set_is_off_heap_trampoline = true; - code_data_container->initialize_flags(code->kind(), code->builtin_id(), - code->is_turbofanned(), - set_is_off_heap_trampoline); - code_data_container->set_kind_specific_flags( - code->kind_specific_flags(kRelaxedLoad), kRelaxedStore); - code_data_container->set_code_entry_point(isolate(), - code->code_entry_point()); - return Handle::cast(code_data_container); - } -#endif // V8_EXTERNAL_CODE_SPACE + const int no_flags = 0; + Handle code_data_container = + NewCodeDataContainer(no_flags, AllocationType::kOld); + const bool set_is_off_heap_trampoline = true; + code_data_container->initialize_flags(code->kind(), code->builtin_id(), + code->is_turbofanned(), + set_is_off_heap_trampoline); + code_data_container->set_kind_specific_flags( + code->kind_specific_flags(kRelaxedLoad), kRelaxedStore); + code_data_container->set_code_entry_point(isolate(), + code->code_entry_point()); + return Handle::cast(code_data_container); +#else bool generate_jump_to_instruction_stream = Builtins::CodeObjectIsExecutable(code->builtin_id()); Handle result = Builtins::GenerateOffHeapTrampolineFor( @@ -2569,20 +2566,10 @@ Handle Factory::NewOffHeapTrampolineFor(Handle code, } #endif raw_result.set_relocation_info(canonical_reloc_info); - if (V8_EXTERNAL_CODE_SPACE_BOOL) { - CodeDataContainer code_data_container = - raw_result.code_data_container(kAcquireLoad); - // Updating flags (in particular is_off_heap_trampoline one) might change - // the value of the instruction start, so update it here. - code_data_container.UpdateCodeEntryPoint(isolate(), raw_result); - // Also update flag values cached on the code data container. - code_data_container.initialize_flags( - raw_code.kind(), raw_code.builtin_id(), raw_code.is_turbofanned(), - set_is_off_heap_trampoline); - } } return ToCodeT(result, isolate()); +#endif // V8_EXTERNAL_CODE_SPACE } Handle Factory::CopyBytecodeArray(Handle source) { From 2f4f3f982948b7c194b5338e0da2a4524bac0657 Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Thu, 5 Jan 2023 17:22:02 +0100 Subject: [PATCH 187/654] [gcmole] Make gcmole test more robust to unrelated code changes. Bug: v8:13637 Change-Id: I90362d4819151465b9e476441cd662c01dd4a50a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4138267 Auto-Submit: Michael Achenbach Reviewed-by: Leszek Swirski Commit-Queue: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#85138} --- tools/gcmole/gcmole.py | 12 +++++++++++- tools/gcmole/test-expectations.txt | 2 +- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/tools/gcmole/gcmole.py b/tools/gcmole/gcmole.py index a6e87fc8e6..c165c135fd 100755 --- a/tools/gcmole/gcmole.py +++ b/tools/gcmole/gcmole.py @@ -505,6 +505,16 @@ def check_correctness_for_arch(files, options): return errors_found +def clean_test_output(output): + """Substitute line number patterns for files except gcmole-test.cc, as + otherwise unrelated code changes require a rebaseline of test expectations. + """ + return re.sub( + r'(?::', + output) + + def has_unexpected_errors(options, errors_found, file_io): """Returns True if error state isn't as expected, False otherwise. @@ -515,7 +525,7 @@ def has_unexpected_errors(options, errors_found, file_io): return errors_found log("Test Run") - output = file_io.getvalue() + output = clean_test_output(file_io.getvalue()) if not errors_found: log("Test file should produce errors, but none were found. Output:") print(output) diff --git a/tools/gcmole/test-expectations.txt b/tools/gcmole/test-expectations.txt index fa3e4b9e28..4b92d78ee6 100644 --- a/tools/gcmole/test-expectations.txt +++ b/tools/gcmole/test-expectations.txt @@ -4,7 +4,7 @@ tools/gcmole/gcmole-test.cc:30:10: warning: Possibly stale variable due to GCs. tools/gcmole/gcmole-test.cc:28:20: note: Call might cause unexpected GC. isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting); ^ -./src/heap/heap.h:988:21: note: GC call here. +./src/heap/heap.h::: note: GC call here. V8_EXPORT_PRIVATE bool CollectGarbage( ^ tools/gcmole/gcmole-test.cc:48:3: warning: Possible problem with evaluation order with interleaved GCs. From 027afd42734c43270a8bf58751769d4b88e19255 Mon Sep 17 00:00:00 2001 From: pthier Date: Thu, 5 Jan 2023 17:01:09 +0100 Subject: [PATCH 188/654] [maglev][arm64] Port CheckJSObjectElementsBounds Bug: v8:7700 Change-Id: I235b0991ea813333737594096f228c980cc5af4e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4138266 Auto-Submit: Patrick Thier Commit-Queue: Victor Gomes Reviewed-by: Victor Gomes Cr-Commit-Position: refs/heads/main@{#85139} --- src/maglev/arm64/maglev-ir-arm64.cc | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index 91dc962f8f..9e80891d7e 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -238,7 +238,33 @@ void AssertInt32::GenerateCode(MaglevAssembler* masm, __ Check(ToCondition(condition_), reason_); } -UNIMPLEMENTED_NODE(CheckJSObjectElementsBounds) +void CheckJSObjectElementsBounds::SetValueLocationConstraints() { + UseRegister(receiver_input()); + set_temporaries_needed(1); + UseRegister(index_input()); +} +void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(receiver_input()); + Register index = ToRegister(index_input()).W(); + Register scratch = general_temporaries().PopFirst(); + __ AssertNotSmi(object); + + if (v8_flags.debug_code) { + __ CompareObjectType(object, scratch, scratch, FIRST_JS_OBJECT_TYPE); + __ Assert(ge, AbortReason::kUnexpectedValue); + } + __ LoadAnyTaggedField(scratch, + FieldMemOperand(object, JSObject::kElementsOffset)); + if (v8_flags.debug_code) { + __ AssertNotSmi(scratch); + } + __ SmiUntagField(scratch, + FieldMemOperand(scratch, FixedArray::kLengthOffset)); + __ Cmp(index, scratch.W()); + __ EmitEagerDeoptIf(hs, DeoptimizeReason::kOutOfBounds, this); +} + UNIMPLEMENTED_NODE_WITH_CALL(JumpLoopPrologue, loop_depth_, unit_) int BuiltinStringFromCharCode::MaxCallStackArgs() const { From 7b9fa44c9824a738239897e5c9dbb299c9e40d8c Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Thu, 5 Jan 2023 15:26:40 +0100 Subject: [PATCH 189/654] [maglev] Fix ProtoApply with spread call By propagating the call arguments mode. Fixed: chromium:1405092 Bug: v8:7700 Change-Id: I6da52fedea1d5a0083d328fdbf39708f956b97cf Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4138261 Reviewed-by: Toon Verwaest Commit-Queue: Toon Verwaest Auto-Submit: Victor Gomes Cr-Commit-Position: refs/heads/main@{#85140} --- src/maglev/maglev-graph-builder.cc | 4 ++-- test/mjsunit/maglev/regress-1405092.js | 29 ++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 test/mjsunit/maglev/regress-1405092.js diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index d313cae0f5..69c301596d 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -3362,8 +3362,8 @@ ValueNode* MaglevGraphBuilder::ReduceFunctionPrototypeApplyCallWithReceiver( } else if (args.count() == 1 || IsNullValue(args[1]) || IsUndefinedValue(args[1])) { // No need for spread. We have only the new receiver. - CallArguments new_args(ConvertReceiverMode::kAny, - {GetTaggedValue(args[0])}); + CallArguments new_args(ConvertReceiverMode::kAny, {GetTaggedValue(args[0])}, + args.mode()); call = ReduceCall(receiver, new_args, feedback_source, speculation_mode); } else { // FunctionPrototypeApply only consider two arguments: the new receiver and diff --git a/test/mjsunit/maglev/regress-1405092.js b/test/mjsunit/maglev/regress-1405092.js new file mode 100644 index 0000000000..81480c9cb0 --- /dev/null +++ b/test/mjsunit/maglev/regress-1405092.js @@ -0,0 +1,29 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax --maglev + +'use strict'; + +function foo(obj, ...args) { + obj['apply'](...args); +} + +var x = 0; + +function bar() { + try { + this.x; + } catch (e) { + x++; + } +} + +%PrepareFunctionForOptimization(foo); +foo(bar); + +%OptimizeMaglevOnNextCall(foo); +foo(bar); + +assertEquals(2, x); From 82e8025d6918be8ba239308358620dbf9ea20ea1 Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Thu, 5 Jan 2023 10:13:09 +0100 Subject: [PATCH 190/654] [builtins] Remove read-only CodeDataContainer optimization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since only applies to builds without v8_enable_external_code_space and only saves minimal snapshot size it doesn't seem worth keeping around. Bug: v8:7464 Change-Id: I81b520235c6174abc340cb74825e6cc86b2b8958 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4136722 Reviewed-by: Dominik Inführ Commit-Queue: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85141} --- src/builtins/builtins.cc | 2 -- src/heap/factory.cc | 49 +++++++++++++--------------------------- src/heap/factory.h | 10 -------- 3 files changed, 16 insertions(+), 45 deletions(-) diff --git a/src/builtins/builtins.cc b/src/builtins/builtins.cc index fcf9e8f1f0..cd5b68b694 100644 --- a/src/builtins/builtins.cc +++ b/src/builtins/builtins.cc @@ -412,7 +412,6 @@ Handle Builtins::GenerateOffHeapTrampolineFor( return Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) .set_kind_specific_flags(kind_specific_flags) - .set_read_only_data_container(!V8_EXTERNAL_CODE_SPACE_BOOL) .set_self_reference(generator.CodeObject()) .set_is_executable(generate_jump_to_instruction_stream) .Build(); @@ -478,7 +477,6 @@ Handle Builtins::CreateInterpreterEntryTrampolineForProfiling( return Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) .set_kind_specific_flags(kind_specific_flags) - .set_read_only_data_container(false) // Mimic the InterpreterEntryTrampoline. .set_builtin(Builtin::kInterpreterEntryTrampoline) .set_is_executable(true) diff --git a/src/heap/factory.cc b/src/heap/factory.cc index 4e8a64d155..6f5ed04886 100644 --- a/src/heap/factory.cc +++ b/src/heap/factory.cc @@ -109,40 +109,26 @@ MaybeHandle Factory::CodeBuilder::BuildInternal( ? local_isolate_->factory()->NewByteArray(code_desc_.reloc_size, AllocationType::kOld) : factory->NewByteArray(code_desc_.reloc_size, AllocationType::kOld); - Handle data_container; - // Use a canonical off-heap trampoline CodeDataContainer if possible. - const int32_t promise_rejection_flag = - Code::IsPromiseRejectionField::encode(true); - if (read_only_data_container_ && - (kind_specific_flags_ == 0 || - kind_specific_flags_ == promise_rejection_flag)) { - const ReadOnlyRoots roots(isolate_); - const auto canonical_code_data_container = Handle::cast( - kind_specific_flags_ == 0 - ? roots.trampoline_trivial_code_data_container_handle() - : roots.trampoline_promise_rejection_code_data_container_handle()); - DCHECK_EQ(canonical_code_data_container->kind_specific_flags(kRelaxedLoad), - kind_specific_flags_); - data_container = canonical_code_data_container; + Handle data_container; + if (CompiledWithConcurrentBaseline()) { + data_container = local_isolate_->factory()->NewCodeDataContainer( + 0, AllocationType::kOld); } else { - if (CompiledWithConcurrentBaseline()) { - data_container = local_isolate_->factory()->NewCodeDataContainer( - 0, AllocationType::kOld); - } else { - data_container = factory->NewCodeDataContainer( - 0, read_only_data_container_ ? AllocationType::kReadOnly - : AllocationType::kOld); - } - if (V8_EXTERNAL_CODE_SPACE_BOOL) { - const bool set_is_off_heap_trampoline = read_only_data_container_; - data_container->initialize_flags(kind_, builtin_, is_turbofanned_, - set_is_off_heap_trampoline); - } - data_container->set_kind_specific_flags(kind_specific_flags_, - kRelaxedStore); + AllocationType allocation_type = + V8_EXTERNAL_CODE_SPACE_BOOL || is_executable_ + ? AllocationType::kOld + : AllocationType::kReadOnly; + data_container = factory->NewCodeDataContainer(0, allocation_type); } + if (V8_EXTERNAL_CODE_SPACE_BOOL) { + static constexpr bool kIsNotOffHeapTrampoline = false; + data_container->initialize_flags(kind_, builtin_, is_turbofanned_, + kIsNotOffHeapTrampoline); + } + data_container->set_kind_specific_flags(kind_specific_flags_, kRelaxedStore); + // Basic block profiling data for builtins is stored in the JS heap rather // than in separately-allocated C++ objects. Allocate that data now if // appropriate. @@ -2529,9 +2515,6 @@ Handle Factory::NewOffHeapTrampolineFor(Handle code, // stored on the Code object, refer to the off-heap metadata area. CHECK_EQ(result->raw_metadata_size(), 0); - // The CodeDataContainer should not be modified beyond this point since it's - // now possibly canonicalized. - // The trampoline code object must inherit specific flags from the original // builtin (e.g. the safepoint-table offset). We set them manually here. { diff --git a/src/heap/factory.h b/src/heap/factory.h index 8733b8051c..bb580de65d 100644 --- a/src/heap/factory.h +++ b/src/heap/factory.h @@ -1011,15 +1011,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase { return *this; } - // Indicates the CodeDataContainer should be allocated in read-only space. - // As an optimization, if the kind-specific flags match that of a canonical - // container, it will be used instead. - CodeBuilder& set_read_only_data_container(bool read_only) { - CHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !read_only); - read_only_data_container_ = read_only; - return *this; - } - CodeBuilder& set_kind_specific_flags(int32_t flags) { kind_specific_flags_ = flags; return *this; @@ -1061,7 +1052,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase { Handle interpreter_data_; BasicBlockProfilerData* profiler_data_ = nullptr; bool is_executable_ = true; - bool read_only_data_container_ = false; bool is_turbofanned_ = false; int stack_slots_ = 0; }; From 322e42bf136cdfda89175c5d4b82778114568eae Mon Sep 17 00:00:00 2001 From: Michael Lippautz Date: Wed, 4 Jan 2023 18:33:59 +0100 Subject: [PATCH 191/654] [heap] Remove Push/Pop for EmbedderHeapTracer wrapper object EmbedderHeapTracer has been removed, making the separate main-thread worklist obsolete. Bug: v8:13207 Change-Id: I3f92457a73d6664b28646247548b78ade491be32 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4136716 Reviewed-by: Anton Bikineev Auto-Submit: Michael Lippautz Commit-Queue: Anton Bikineev Cr-Commit-Position: refs/heads/main@{#85142} --- src/heap/marking-visitor-inl.h | 11 ++--------- src/heap/marking-worklist-inl.h | 11 ----------- src/heap/marking-worklist.cc | 9 +-------- src/heap/marking-worklist.h | 10 ---------- test/unittests/heap/marking-worklist-unittest.cc | 11 ----------- 5 files changed, 3 insertions(+), 49 deletions(-) diff --git a/src/heap/marking-visitor-inl.h b/src/heap/marking-visitor-inl.h index 6c9f0f6f83..9c8a9c5b6a 100644 --- a/src/heap/marking-visitor-inl.h +++ b/src/heap/marking-visitor-inl.h @@ -301,15 +301,8 @@ inline int MarkingVisitorBase:: requires_snapshot && local_marking_worklists_->ExtractWrapper(map, object, wrapper_snapshot); const int size = concrete_visitor()->VisitJSObjectSubclass(map, object); - if (size) { - if (valid_snapshot) { - // Success: The object needs to be processed for embedder references. - local_marking_worklists_->PushExtractedWrapper(wrapper_snapshot); - } else if (!requires_snapshot) { - // Snapshot not supported. Just fall back to pushing the wrapper itself - // instead which will be processed on the main thread. - local_marking_worklists_->PushWrapper(object); - } + if (size && valid_snapshot) { + local_marking_worklists_->PushExtractedWrapper(wrapper_snapshot); } return size; } diff --git a/src/heap/marking-worklist-inl.h b/src/heap/marking-worklist-inl.h index 6024a3e46a..b34f83ec50 100644 --- a/src/heap/marking-worklist-inl.h +++ b/src/heap/marking-worklist-inl.h @@ -18,7 +18,6 @@ template void MarkingWorklists::Update(Callback callback) { shared_.Update(callback); on_hold_.Update(callback); - wrapper_.Update(callback); other_.Update(callback); for (auto& cw : context_worklists_) { cw.worklist->Update(callback); @@ -59,16 +58,6 @@ void MarkingWorklists::Local::PushExtractedWrapper( cpp_marking_state_->MarkAndPush(snapshot); } -void MarkingWorklists::Local::PushWrapper(HeapObject object) { - DCHECK_NULL(cpp_marking_state_); - wrapper_.Push(object); -} - -bool MarkingWorklists::Local::PopWrapper(HeapObject* object) { - DCHECK_NULL(cpp_marking_state_); - return wrapper_.Pop(object); -} - Address MarkingWorklists::Local::SwitchToContext(Address context) { if (context == active_context_) return context; return SwitchToContextSlow(context); diff --git a/src/heap/marking-worklist.cc b/src/heap/marking-worklist.cc index d05540e2f8..a035155b4a 100644 --- a/src/heap/marking-worklist.cc +++ b/src/heap/marking-worklist.cc @@ -24,7 +24,6 @@ namespace internal { void MarkingWorklists::Clear() { shared_.Clear(); on_hold_.Clear(); - wrapper_.Clear(); other_.Clear(); for (auto& cw : context_worklists_) { cw.worklist->Clear(); @@ -107,7 +106,6 @@ MarkingWorklists::Local::Local( : active_(&shared_), shared_(*global->shared()), on_hold_(*global->on_hold()), - wrapper_(*global->wrapper()), active_context_(kSharedContext), is_per_context_mode_(!global->context_worklists().empty()), worklist_by_context_( @@ -118,7 +116,6 @@ MarkingWorklists::Local::Local( void MarkingWorklists::Local::Publish() { shared_.Publish(); on_hold_.Publish(); - wrapper_.Publish(); other_.Publish(); if (is_per_context_mode_) { for (auto& cw : worklist_by_context_) { @@ -153,11 +150,7 @@ bool MarkingWorklists::Local::IsEmpty() { } bool MarkingWorklists::Local::IsWrapperEmpty() const { - if (cpp_marking_state_) { - DCHECK(wrapper_.IsLocalAndGlobalEmpty()); - return cpp_marking_state_->IsLocalEmpty(); - } - return wrapper_.IsLocalAndGlobalEmpty(); + return !cpp_marking_state_ || cpp_marking_state_->IsLocalEmpty(); } void MarkingWorklists::Local::ShareWork() { diff --git a/src/heap/marking-worklist.h b/src/heap/marking-worklist.h index b2c6fc297e..9a37372c21 100644 --- a/src/heap/marking-worklist.h +++ b/src/heap/marking-worklist.h @@ -25,7 +25,6 @@ class JSObject; const int kMainThreadTask = 0; using MarkingWorklist = ::heap::base::Worklist; -using WrapperTracingWorklist = ::heap::base::Worklist; // We piggyback on marking to compute object sizes per native context that is // needed for the new memory measurement API. The algorithm works as follows: @@ -91,7 +90,6 @@ class V8_EXPORT_PRIVATE MarkingWorklists final { MarkingWorklist* shared() { return &shared_; } MarkingWorklist* on_hold() { return &on_hold_; } MarkingWorklist* other() { return &other_; } - WrapperTracingWorklist* wrapper() { return &wrapper_; } // A list of (context, worklist) pairs that was set up at the start of // marking by CreateContextWorklists. @@ -122,11 +120,6 @@ class V8_EXPORT_PRIVATE MarkingWorklists final { // for freshly allocatd objects. MarkingWorklist on_hold_; - // Worklist for objects that potentially require embedder tracing, i.e., - // these objects need to be handed over to the embedder to find the full - // transitive closure. - WrapperTracingWorklist wrapper_; - // Per-context worklists. Objects are in the `shared_` worklist by default. std::vector context_worklists_; // Worklist used for objects that are attributed to contexts that are @@ -166,8 +159,6 @@ class V8_EXPORT_PRIVATE MarkingWorklists::Local final { WrapperSnapshot& snapshot); inline void PushExtractedWrapper(const WrapperSnapshot& snapshot); inline bool SupportsExtractWrapper(); - inline void PushWrapper(HeapObject object); - inline bool PopWrapper(HeapObject* object); void Publish(); bool IsEmpty(); @@ -205,7 +196,6 @@ class V8_EXPORT_PRIVATE MarkingWorklists::Local final { MarkingWorklist::Local* active_; MarkingWorklist::Local shared_; MarkingWorklist::Local on_hold_; - WrapperTracingWorklist::Local wrapper_; Address active_context_; const bool is_per_context_mode_; const std::unordered_map> diff --git a/test/unittests/heap/marking-worklist-unittest.cc b/test/unittests/heap/marking-worklist-unittest.cc index 0bd53c2893..c6eabd85b4 100644 --- a/test/unittests/heap/marking-worklist-unittest.cc +++ b/test/unittests/heap/marking-worklist-unittest.cc @@ -40,17 +40,6 @@ TEST_F(MarkingWorklistTest, PushPopOnHold) { EXPECT_EQ(popped_object, pushed_object); } -TEST_F(MarkingWorklistTest, PushPopEmbedder) { - MarkingWorklists holder; - MarkingWorklists::Local worklists(&holder); - HeapObject pushed_object = - ReadOnlyRoots(i_isolate()->heap()).undefined_value(); - worklists.PushWrapper(pushed_object); - HeapObject popped_object; - EXPECT_TRUE(worklists.PopWrapper(&popped_object)); - EXPECT_EQ(popped_object, pushed_object); -} - TEST_F(MarkingWorklistTest, MergeOnHold) { MarkingWorklists holder; MarkingWorklists::Local main_worklists(&holder); From 88eac4b870dc9becb11fdaa2e71ed60ceab44a60 Mon Sep 17 00:00:00 2001 From: Nico Hartmann Date: Mon, 9 Jan 2023 13:23:28 +0100 Subject: [PATCH 192/654] [turboshaft] Basic TypedOptimization and new DeadCodeElimination This CL introduces typed optimizations for Turboshaft, which replaces all operations that produce a constant output (and don't have side effects) by the corresponding constant. In addition, a new pass for eliminating dead code is introduced that cannot only remove dead operations, but also rewrite branches that are not required into GotoOps. Drive-by: Introduce -0 as a "special value" for Float32Type and Float64Type to fix a few issues where 0 and -0 have been treated as identical. Bug: v8:12783 Change-Id: Ia1450ad7a9abb5d58c7d753596ed08a33a73184f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110993 Reviewed-by: Darius Mercadier Commit-Queue: Nico Hartmann Cr-Commit-Position: refs/heads/main@{#85143} --- BUILD.bazel | 3 + BUILD.gn | 3 + src/compiler/pipeline.cc | 72 ++- src/compiler/turboshaft/assembler.h | 9 + .../turboshaft/branch-elimination-reducer.h | 7 +- .../dead-code-elimination-reducer.h | 461 ++++++++++++++++++ src/compiler/turboshaft/optimization-phase.h | 71 +-- src/compiler/turboshaft/sidetable.h | 2 + .../turboshaft/type-inference-reducer.h | 365 ++++++++++---- .../turboshaft/typed-optimizations-reducer.h | 90 ++++ src/compiler/turboshaft/types.cc | 79 +-- src/compiler/turboshaft/types.h | 60 ++- .../turboshaft/uniform-reducer-adapter.h | 73 +++ src/logging/runtime-call-stats.h | 4 +- src/objects/turboshaft-types.h | 5 + src/objects/turboshaft-types.tq | 11 +- test/mjsunit/turboshaft/type-inference.js | 42 +- .../mjsunit/turboshaft/typed-optimizations.js | 39 ++ .../turboshaft/turboshaft-types-unittest.cc | 92 ++-- 19 files changed, 1203 insertions(+), 285 deletions(-) create mode 100644 src/compiler/turboshaft/dead-code-elimination-reducer.h create mode 100644 src/compiler/turboshaft/typed-optimizations-reducer.h create mode 100644 src/compiler/turboshaft/uniform-reducer-adapter.h create mode 100644 test/mjsunit/turboshaft/typed-optimizations.js diff --git a/BUILD.bazel b/BUILD.bazel index b84ca43f41..c7c4a5961e 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -2898,6 +2898,7 @@ filegroup( "src/compiler/turboshaft/assembler.h", "src/compiler/turboshaft/assert-types-reducer.h", "src/compiler/turboshaft/branch-elimination-reducer.h", + "src/compiler/turboshaft/dead-code-elimination-reducer.h", "src/compiler/turboshaft/decompression-optimization.cc", "src/compiler/turboshaft/decompression-optimization.h", "src/compiler/turboshaft/deopt-data.h", @@ -2932,8 +2933,10 @@ filegroup( "src/compiler/turboshaft/type-inference-reducer.h", "src/compiler/turboshaft/type-parser.cc", "src/compiler/turboshaft/type-parser.h", + "src/compiler/turboshaft/typed-optimizations-reducer.h", "src/compiler/turboshaft/types.cc", "src/compiler/turboshaft/types.h", + "src/compiler/turboshaft/uniform-reducer-adapter.h", "src/compiler/turboshaft/utils.cc", "src/compiler/turboshaft/utils.h", "src/compiler/turboshaft/value-numbering-reducer.h", diff --git a/BUILD.gn b/BUILD.gn index e7b5853d0c..2b16b574ac 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -2987,6 +2987,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/assembler.h", "src/compiler/turboshaft/assert-types-reducer.h", "src/compiler/turboshaft/branch-elimination-reducer.h", + "src/compiler/turboshaft/dead-code-elimination-reducer.h", "src/compiler/turboshaft/decompression-optimization.h", "src/compiler/turboshaft/deopt-data.h", "src/compiler/turboshaft/fast-hash.h", @@ -3009,7 +3010,9 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/snapshot-table.h", "src/compiler/turboshaft/type-inference-reducer.h", "src/compiler/turboshaft/type-parser.h", + "src/compiler/turboshaft/typed-optimizations-reducer.h", "src/compiler/turboshaft/types.h", + "src/compiler/turboshaft/uniform-reducer-adapater.h", "src/compiler/turboshaft/utils.h", "src/compiler/turboshaft/value-numbering-reducer.h", "src/compiler/turboshaft/variable-reducer.h", diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index 9512d7d8f0..6e2c44963a 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -82,10 +82,12 @@ #include "src/compiler/turboshaft/assembler.h" #include "src/compiler/turboshaft/assert-types-reducer.h" #include "src/compiler/turboshaft/branch-elimination-reducer.h" +#include "src/compiler/turboshaft/dead-code-elimination-reducer.h" #include "src/compiler/turboshaft/decompression-optimization.h" #include "src/compiler/turboshaft/graph-builder.h" #include "src/compiler/turboshaft/graph-visualizer.h" #include "src/compiler/turboshaft/graph.h" +#include "src/compiler/turboshaft/index.h" #include "src/compiler/turboshaft/late-escape-analysis-reducer.h" #include "src/compiler/turboshaft/machine-optimization-reducer.h" #include "src/compiler/turboshaft/memory-optimization.h" @@ -94,6 +96,7 @@ #include "src/compiler/turboshaft/select-lowering-reducer.h" #include "src/compiler/turboshaft/simplify-tf-loops.h" #include "src/compiler/turboshaft/type-inference-reducer.h" +#include "src/compiler/turboshaft/typed-optimizations-reducer.h" #include "src/compiler/turboshaft/types.h" #include "src/compiler/turboshaft/value-numbering-reducer.h" #include "src/compiler/turboshaft/variable-reducer.h" @@ -111,6 +114,7 @@ #include "src/logging/code-events.h" #include "src/logging/counters.h" #include "src/logging/runtime-call-stats-scope.h" +#include "src/logging/runtime-call-stats.h" #include "src/objects/shared-function-info.h" #include "src/tracing/trace-event.h" #include "src/utils/ostreams.h" @@ -2119,25 +2123,43 @@ struct OptimizeTurboshaftPhase { } }; -struct TurboshaftTypeInferencePhase { - DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftTypeInference) +struct TurboshaftTypedOptimizationsPhase { + DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftTypedOptimizations) + + void Run(PipelineData* data, Zone* temp_zone) { + DCHECK(data->HasTurboshaftGraph()); + turboshaft::OptimizationPhase:: + Run(&data->turboshaft_graph(), temp_zone, data->node_origins(), + std::tuple{turboshaft::TypeInferenceReducerArgs{data->isolate()}}); + } +}; + +struct TurboshaftTypeAssertionsPhase { + DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftTypeAssertions) void Run(PipelineData* data, Zone* temp_zone) { DCHECK(data->HasTurboshaftGraph()); UnparkedScopeIfNeeded scope(data->broker()); - if (v8_flags.turboshaft_assert_types) { - turboshaft::OptimizationPhase:: - Run(&data->turboshaft_graph(), temp_zone, data->node_origins(), - std::tuple{turboshaft::TypeInferenceReducerArgs{data->isolate()}, - turboshaft::AssertTypesReducerArgs{data->isolate()}}); - } else { - turboshaft::OptimizationPhase::Run( - &data->turboshaft_graph(), temp_zone, data->node_origins(), - std::tuple{turboshaft::TypeInferenceReducerArgs{data->isolate()}}); - } + turboshaft::OptimizationPhase:: + Run(&data->turboshaft_graph(), temp_zone, data->node_origins(), + std::tuple{turboshaft::TypeInferenceReducerArgs{data->isolate()}, + turboshaft::AssertTypesReducerArgs{data->isolate()}}); + } +}; + +struct TurboshaftDeadCodeEliminationPhase { + DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftDeadCodeElimination) + + void Run(PipelineData* data, Zone* temp_zone) { + DCHECK(data->HasTurboshaftGraph()); + + turboshaft::OptimizationPhase::Run( + &data->turboshaft_graph(), temp_zone, data->node_origins(), + std::tuple{}); } }; @@ -2741,6 +2763,13 @@ struct PrintTurboshaftGraphPhase { } return false; }); + PrintTurboshaftCustomDataPerOperation( + data->info(), "Use Count (saturated)", data->turboshaft_graph(), + [](std::ostream& stream, const turboshaft::Graph& graph, + turboshaft::OpIndex index) -> bool { + stream << static_cast(graph.Get(index).saturated_use_count); + return true; + }); } if (data->info()->trace_turbo_graph()) { @@ -3119,8 +3148,19 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) { Run( DecompressionOptimizationPhase::phase_name()); - Run(); - Run(TurboshaftTypeInferencePhase::phase_name()); + Run(); + Run( + TurboshaftTypedOptimizationsPhase::phase_name()); + + if (v8_flags.turboshaft_assert_types) { + Run(); + Run( + TurboshaftTypeAssertionsPhase::phase_name()); + } + + Run(); + Run( + TurboshaftDeadCodeEliminationPhase::phase_name()); Run(linkage); TraceSchedule(data->info(), data, data->schedule(), diff --git a/src/compiler/turboshaft/assembler.h b/src/compiler/turboshaft/assembler.h index 5e6e22c619..7025ac0092 100644 --- a/src/compiler/turboshaft/assembler.h +++ b/src/compiler/turboshaft/assembler.h @@ -116,6 +116,15 @@ class ReducerBase : public ReducerBaseForwarder { void Analyze() {} + bool ShouldEliminateOperation(OpIndex index, const Operation& op) { + return op.saturated_use_count == 0; + } + + bool ShouldEliminateBranch(OpIndex index, const BranchOp& op, + BlockIndex& goto_block) { + return false; + } + // Get, GetPredecessorValue, Set and NewFreshVariable should be overwritten by // the VariableReducer. If the reducer stack has no VariableReducer, then // those methods should not be called. diff --git a/src/compiler/turboshaft/branch-elimination-reducer.h b/src/compiler/turboshaft/branch-elimination-reducer.h index 3057bf6e26..c013074212 100644 --- a/src/compiler/turboshaft/branch-elimination-reducer.h +++ b/src/compiler/turboshaft/branch-elimination-reducer.h @@ -326,8 +326,11 @@ class BranchEliminationReducer : public Next { // inline the destination block in place of the Goto. // We pass `false` to `direct_input` here, as we're looking one // block ahead of the current one. - Asm().CloneAndInlineBlock(old_dst, false); - return OpIndex::Invalid(); + // TODO(nicohartmann@): Temporarily disable this "optimization" because it + // prevents dead code elimination in some cases. Reevaluate this and + // reenable if phases have been reordered properly. + // Asm().CloneAndInlineBlock(old_dst, false); + // return OpIndex::Invalid(); } goto no_change; diff --git a/src/compiler/turboshaft/dead-code-elimination-reducer.h b/src/compiler/turboshaft/dead-code-elimination-reducer.h new file mode 100644 index 0000000000..cc3cf3cdeb --- /dev/null +++ b/src/compiler/turboshaft/dead-code-elimination-reducer.h @@ -0,0 +1,461 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_REDUCER_H_ +#define V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_REDUCER_H_ + +#include + +#include "src/common/globals.h" +#include "src/compiler/backend/instruction-codes.h" +#include "src/compiler/turboshaft/assembler.h" +#include "src/compiler/turboshaft/graph.h" +#include "src/compiler/turboshaft/index.h" +#include "src/compiler/turboshaft/operations.h" + +namespace v8::internal::compiler::turboshaft { + +// General overview +// +// DeadCodeAnalysis iterates the graph backwards to propagate liveness +// information. This information consists of the ControlState and the +// OperationState. +// +// OperationState reflects the liveness of operations. An operation is live if +// +// 1) The operation has the `is_required_when_unused` property +// 2) Any of its outputs is live (is used in a live operation). +// +// We introduce the concept of `weak live` which only differs from (strong) +// liveness on how it impacts the ControlState, but is otherwise identical. On +// operation is weak live if +// +// Any of its outputs is weak live (is used in a weak live operation) and the +// operation is not (strong) live. +// +// If the operation is neither strong nor weak live, the operation is dead and +// can be eliminated. +// +// ControlState describes to which block we could jump immediately without +// changing the program semantics. That is missing any side effects, required +// control flow or any strong(!) live operations. This information is then used +// at BranchOps to rewrite them to a GotoOp towards the corresponding block. +// Weak live operations thus are not eliminated but allow control flow to be +// rewritten around them. By marking stack checks (and all operations that they +// depend on) as weak live, this allows otherwise empty loops to be eliminated. +// From the output control state(s) c after an operation, the control state c' +// before the operation is computed as follows: +// +// | Bi if ct, cf are Bi or Unreachable +// c' = [Branch](ct, cf) = { +// | NotEliminatable otherwise +// +// And if c' = Bi, then the BranchOp can be rewritten into GotoOp(Bi). +// +// | NotEliminatable if Op is strong live +// c' = [Op](c) = { +// | c otherwise +// +// | Bk if c = Bk +// c' = [Merge i](c) = { Bi if Merge i has no live phis +// | NotEliminatable otherwise +// +// Where Merge is an imaginary operation at the start of every merge block. This +// is the important part for the analysis. If block `Merge i` does not have any +// strong live phi operations, then we don't necessarily need to distinguish the +// control flow paths going into that block and if we further don't encounter +// any (strong) live operations along any of the paths leading to `Merge i` +// starting at some BranchOp, we can skip both branches and eliminate the +// control flow entirely by rewriting the BranchOp into a GotoOp(Bi). Notice +// that if the control state already describes a potential Goto-target Bk, then +// we do not replace that in order to track the farthest block we can jump to. + +struct ControlState { + // Lattice: + // + // NotEliminatable + // / | \ + // B1 ... Bn + // \ | / + // Unreachable + // + // We use ControlState to propagate information during the analysis about how + // branches can be rewritten. Read the values like this: + // - NotEliminatable: We cannot rewrite a branch, because we need the control + // flow (e.g. because we have seen live operations on either branch or need + // the phi at the merge). + // - Bj: Control can be rewritten to go directly to Block Bj, because all + // paths to that block are free of live operations. + // - Unreachable: This is the bottom element and it represents that we haven't + // seen anything live yet and are free to rewrite branches to any block + // reachable from the current block. + enum Kind { + kUnreachable, + kBlock, + kNotEliminatable, + }; + + static ControlState NotEliminatable() { + return ControlState{kNotEliminatable}; + } + static ControlState Block(BlockIndex block) { + return ControlState{kBlock, block}; + } + static ControlState Unreachable() { return ControlState{kUnreachable}; } + + explicit ControlState(Kind kind, BlockIndex block = BlockIndex::Invalid()) + : kind(kind), block(block) {} + + static ControlState LeastUpperBound(const ControlState& lhs, + const ControlState& rhs) { + switch (lhs.kind) { + case Kind::kUnreachable: + return rhs; + case Kind::kBlock: { + if (rhs.kind == Kind::kUnreachable) return lhs; + if (rhs.kind == Kind::kNotEliminatable) return rhs; + if (lhs.block == rhs.block) return lhs; + return NotEliminatable(); + } + case Kind::kNotEliminatable: + return lhs; + } + } + + Kind kind; + BlockIndex block; +}; + +inline std::ostream& operator<<(std::ostream& stream, + const ControlState& state) { + switch (state.kind) { + case ControlState::kNotEliminatable: + return stream << "NotEliminatable"; + case ControlState::kBlock: + return stream << "Block(" << state.block << ")"; + case ControlState::kUnreachable: + return stream << "Unreachable"; + } +} + +inline bool operator==(const ControlState& lhs, const ControlState& rhs) { + if (lhs.kind != rhs.kind) return false; + if (lhs.kind == ControlState::kBlock) { + DCHECK_EQ(rhs.kind, ControlState::kBlock); + return lhs.block == rhs.block; + } + return true; +} + +inline bool operator!=(const ControlState& lhs, const ControlState& rhs) { + return !(lhs == rhs); +} + +struct OperationState { + // Lattice: + // + // Live + // | + // WeakLive + // | + // Dead + // + // Describes the liveness state of an operation. We use the notion of weak + // liveness to express that an operation needs to be kept if we cannot + // eliminate (jump over) the entire basic block. In other words: A weak live + // operation will not be eliminated, but it doesn't prevent the propagation of + // the control state to allow to jump over the block if it contains no + // (strong) live operations. This will be useful to eliminate loops that are + // kept alive only by the contained stack checks. + enum Liveness : uint8_t { + kDead, + kWeakLive, + kLive, + }; + + static Liveness LeastUpperBound(Liveness lhs, Liveness rhs) { + static_assert(kLive > kWeakLive && kWeakLive > kDead); + return std::max(lhs, rhs); + } +}; + +inline std::ostream& operator<<(std::ostream& stream, + OperationState::Liveness liveness) { + switch (liveness) { + case OperationState::kDead: + return stream << "Dead"; + case OperationState::kWeakLive: + return stream << "WeakLive"; + case OperationState::kLive: + return stream << "Live"; + } + UNREACHABLE(); +} + +class DeadCodeAnalysis { + public: + explicit DeadCodeAnalysis(Graph& graph, Zone* phase_zone) + : graph_(graph), + liveness_(graph.op_id_count(), OperationState::kDead, phase_zone), + entry_control_state_(graph.block_count(), ControlState::Unreachable(), + phase_zone), + rewritable_branch_targets_(phase_zone) {} + + template + std::pair, + ZoneMap> + Run() { + if constexpr (trace_analysis) { + std::cout << "===== Running Dead Code Analysis =====\n"; + } + for (uint32_t unprocessed_count = graph_.block_count(); + unprocessed_count > 0;) { + BlockIndex block_index = static_cast(unprocessed_count - 1); + --unprocessed_count; + + const Block& block = graph_.Get(block_index); + ProcessBlock(block, &unprocessed_count); + } + + if constexpr (trace_analysis) { + std::cout << "===== Results =====\n== Operation State ==\n"; + for (Block b : graph_.blocks()) { + std::cout << PrintAsBlockHeader{b} << ":\n"; + for (OpIndex index : graph_.OperationIndices(b)) { + std::cout << " " << std::setw(8) << liveness_[index] << " " + << std::setw(3) << index.id() << ": " << graph_.Get(index) + << "\n"; + } + } + + std::cout << "== Rewritable Branches ==\n"; + for (auto [branch_id, target] : rewritable_branch_targets_) { + DCHECK(target.valid()); + std::cout << " " << std::setw(3) << branch_id << ": Branch ==> Goto " + << target.id() << "\n"; + } + std::cout << "==========\n"; + } + + return {std::move(liveness_), std::move(rewritable_branch_targets_)}; + } + + template + void ProcessBlock(const Block& block, uint32_t* unprocessed_count) { + if constexpr (trace_analysis) { + std::cout << "\n==========\n=== Processing " << PrintAsBlockHeader{block} + << ":\n==========\nEXIT CONTROL STATE\n"; + } + auto successors = SuccessorBlocks(block.LastOperation(graph_)); + ControlState control_state = ControlState::Unreachable(); + for (size_t i = 0; i < successors.size(); ++i) { + const auto& r = entry_control_state_[successors[i]->index()]; + if constexpr (trace_analysis) { + std::cout << " Successor " << successors[i]->index() << ": " << r + << "\n"; + } + control_state = ControlState::LeastUpperBound(control_state, r); + } + if constexpr (trace_analysis) + std::cout << "Combined: " << control_state << "\n"; + + // If control_state == ControlState::Block(b), then the merge block b is + // reachable through every path starting at the current block without any + // live operations. + + if constexpr (trace_analysis) std::cout << "OPERATION STATE\n"; + auto op_range = graph_.OperationIndices(block); + bool has_live_phis = false; + for (auto it = op_range.end(); it != op_range.begin();) { + --it; + OpIndex index = *it; + const Operation& op = graph_.Get(index); + if constexpr (trace_analysis) std::cout << index << ":" << op << "\n"; + OperationState::Liveness op_state = liveness_[index]; + + if (op.Is()) { + if (control_state != ControlState::NotEliminatable()) { + // Branch is still dead. + op_state = OperationState::kWeakLive; + // If we know a target block we can rewrite into a goto. + if (control_state.kind == ControlState::kBlock) { + BlockIndex target = control_state.block; + DCHECK(target.valid()); + rewritable_branch_targets_[index.id()] = target; + } + } else { + // Branch is live. We cannot rewrite it. + op_state = OperationState::kLive; + auto it = rewritable_branch_targets_.find(index.id()); + if (it != rewritable_branch_targets_.end()) { + rewritable_branch_targets_.erase(it); + } + } + } else if (op.saturated_use_count == 0) { + // Operation is already recognized as dead by a previous analysis. + DCHECK_EQ(op_state, OperationState::kDead); + } else if (op.Is()) { + // Gotos are WeakLive. + op_state = OperationState::kWeakLive; + } else if (op.Properties().is_required_when_unused) { + op_state = OperationState::kLive; + } else if (op.Is()) { + has_live_phis = has_live_phis || (op_state == OperationState::kLive); + + if (block.IsLoop()) { + const PhiOp& phi = op.Cast(); + // Check if the operation state of the input coming from the backedge + // changes the liveness of the phi. In that case, trigger a revisit of + // the loop. + if (liveness_[phi.inputs()[PhiOp::kLoopPhiBackEdgeIndex]] < + op_state) { + if constexpr (trace_analysis) { + std::cout + << "Operation state has changed. Need to revisit loop.\n"; + } + Block* backedge = block.LastPredecessor(); + // Revisit the loop by increasing the {unprocessed_count} to include + // all blocks of the loop. + *unprocessed_count = + std::max(*unprocessed_count, backedge->index().id() + 1); + } + } + } + + // TODO(nicohartmann@): Handle Stack Guards to allow elimination of + // otherwise empty loops. + // + // if(const CallOp* call = op.TryCast()) { + // if(std::string(call->descriptor->descriptor->debug_name()) + // == "StackGuard") { + // DCHECK_EQ(op_state, OperationState::kLive); + // op_state = OperationState::kWeakLive; + // } + // } + + DCHECK_LE(liveness_[index], op_state); + // If everything is still dead. We don't need to update anything. + if (op_state == OperationState::kDead) continue; + + // We have a (possibly weak) live operation. + if constexpr (trace_analysis) { + std::cout << " " << op_state << " <== " << liveness_[index] << "\n"; + } + liveness_[index] = op_state; + + if constexpr (trace_analysis) { + if (op.input_count > 0) std::cout << " Updating inputs:\n"; + } + for (OpIndex input : op.inputs()) { + auto old_input_state = liveness_[input]; + auto new_input_state = + OperationState::LeastUpperBound(old_input_state, op_state); + if constexpr (trace_analysis) { + std::cout << " " << input << ": " << new_input_state + << " <== " << old_input_state << " || " << op_state << "\n"; + } + liveness_[input] = new_input_state; + } + + if (op_state == OperationState::kLive && + control_state != ControlState::NotEliminatable()) { + // This block has live operations, which means that we can't skip it. + // Reset the ControlState to NotEliminatable. + if constexpr (trace_analysis) { + std::cout << "Block has live operations. New control state: " + << ControlState::NotEliminatable() << "\n"; + } + control_state = ControlState::NotEliminatable(); + } + } + + if constexpr (trace_analysis) { + std::cout << "ENTRY CONTROL STATE\nAfter operations: " << control_state + << "\n"; + } + + // If this block is a merge and we don't have any live phis, it is a + // potential target for branch redirection. + if (block.IsLoopOrMerge()) { + if (!has_live_phis) { + if (control_state.kind != ControlState::kBlock) { + control_state = ControlState::Block(block.index()); + if constexpr (trace_analysis) { + std::cout + << "Block is loop or merge and has no live phi operations.\n"; + } + } else if constexpr (trace_analysis) { + std::cout << "Block is loop or merge and has no live phi " + "operations.\nControl state already has a goto block: " + << control_state << "\n"; + } + } + if (block.IsLoop() && + entry_control_state_[block.index()] != control_state) { + if constexpr (trace_analysis) { + std::cout << "Control state has changed. Need to revisit loop.\n"; + } + Block* backedge = block.LastPredecessor(); + DCHECK_NOT_NULL(backedge); + // Revisit the loop by increasing the {unprocessed_count} to include + // all blocks of the loop. + *unprocessed_count = + std::max(*unprocessed_count, backedge->index().id() + 1); + } + } + + if constexpr (trace_analysis) { + std::cout << "Final: " << control_state << "\n"; + } + entry_control_state_[block.index()] = control_state; + } + + private: + Graph& graph_; + FixedSidetable liveness_; + FixedBlockSidetable entry_control_state_; + ZoneMap rewritable_branch_targets_; +}; + +template +class DeadCodeEliminationReducer : public Next { + public: + using Next::Asm; + + template + explicit DeadCodeEliminationReducer(const std::tuple& args) + : Next(args), + branch_rewrite_targets_(Asm().phase_zone()), + analyzer_(Asm().modifiable_input_graph(), Asm().phase_zone()) {} + + void Analyze() { + // TODO(nicohartmann@): We might want to make this a flag. + constexpr bool trace_analysis = false; + std::tie(liveness_, branch_rewrite_targets_) = + analyzer_.Run(); + Next::Analyze(); + } + + bool ShouldEliminateOperation(OpIndex index, const Operation& op) { + DCHECK(!op.Is()); + return (*liveness_)[index] == OperationState::kDead; + } + + bool ShouldEliminateBranch(OpIndex index, const BranchOp& op, + BlockIndex& goto_target) { + auto it = branch_rewrite_targets_.find(index.id()); + if (it == branch_rewrite_targets_.end()) return false; + goto_target = it->second; + return true; + } + + private: + base::Optional> liveness_; + ZoneMap branch_rewrite_targets_; + DeadCodeAnalysis analyzer_; +}; + +} // namespace v8::internal::compiler::turboshaft + +#endif // V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_REDUCER_H_ diff --git a/src/compiler/turboshaft/optimization-phase.h b/src/compiler/turboshaft/optimization-phase.h index 535b803501..6bb6579c57 100644 --- a/src/compiler/turboshaft/optimization-phase.h +++ b/src/compiler/turboshaft/optimization-phase.h @@ -17,6 +17,7 @@ #include "src/base/vector.h" #include "src/compiler/node-origin-table.h" #include "src/compiler/turboshaft/graph.h" +#include "src/compiler/turboshaft/index.h" #include "src/compiler/turboshaft/operations.h" #include "src/compiler/turboshaft/snapshot-table.h" @@ -56,66 +57,6 @@ V8_INLINE bool ShouldSkipOperation(const Operation& op) { return op.saturated_use_count == 0; } -// TODO(dmercadier, tebbi): transform this analyzer into a reducer, and plug in -// into some reducer stacks. -struct LivenessAnalyzer : AnalyzerBase { - using Base = AnalyzerBase; - // Using `uint8_t` instead of `bool` prevents `std::vector` from using a - // bitvector, which has worse performance. - std::vector op_used; - - LivenessAnalyzer(const Graph& graph, Zone* phase_zone) - : AnalyzerBase(graph, phase_zone), op_used(graph.op_id_count(), false) {} - - bool OpIsUsed(OpIndex i) { return op_used[i.id()]; } - - void Run() { - for (uint32_t unprocessed_count = graph.block_count(); - unprocessed_count > 0;) { - BlockIndex block_index = static_cast(unprocessed_count - 1); - --unprocessed_count; - const Block& block = graph.Get(block_index); - if (V8_UNLIKELY(block.IsLoop())) { - ProcessBlock(block, &unprocessed_count); - } else { - ProcessBlock(block, &unprocessed_count); - } - } - } - - template - void ProcessBlock(const Block& block, uint32_t* unprocessed_count) { - auto op_range = graph.OperationIndices(block); - for (auto it = op_range.end(); it != op_range.begin();) { - --it; - OpIndex index = *it; - const Operation& op = graph.Get(index); - if (op.Properties().is_required_when_unused) { - op_used[index.id()] = true; - } else if (!OpIsUsed(index)) { - continue; - } - if constexpr (is_loop) { - if (op.Is()) { - const PhiOp& phi = op.Cast(); - // Mark the loop backedge as used. Trigger a revisit if it wasn't - // marked as used already. - if (!OpIsUsed(phi.inputs()[PhiOp::kLoopPhiBackEdgeIndex])) { - Block* backedge = block.LastPredecessor(); - // Revisit the loop by increasing the `unprocessed_count` to include - // all blocks of the loop. - *unprocessed_count = - std::max(*unprocessed_count, backedge->index().id() + 1); - } - } - } - for (OpIndex input : op.inputs()) { - op_used[input.id()] = true; - } - } - } -}; - template