Remove dynamic map checks and custom deoptimization kinds

This CL removes:

- Dynamic map checks aka minimorphic property loads (TF support,
  builtins).
- "Bailout" deopts (= drop to the interpreter once, but don't
  throw out optimized code).
- "EagerWithResume" deopts (= part of dynamic map check
  functionality, we call a builtin for the deopt check and deopt
  or resume based on the result).

Fixed: v8:12552
Change-Id: I492cf1667e0f54586690b2f72a65ea804224b840
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3401585
Auto-Submit: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Sathya Gunasekaran <gsathya@chromium.org>
Reviewed-by: Toon Verwaest <verwaest@chromium.org>
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79544}
This commit is contained in:
Jakob Gruber 2022-03-21 11:56:46 +01:00 committed by V8 LUCI CQ
parent 54c3344edc
commit b2978927d8
61 changed files with 169 additions and 1463 deletions

View File

@ -800,7 +800,6 @@ filegroup(
"src/builtins/function.tq",
"src/builtins/growable-fixed-array.tq",
"src/builtins/ic-callable.tq",
"src/builtins/ic-dynamic-check-maps.tq",
"src/builtins/ic.tq",
"src/builtins/internal-coverage.tq",
"src/builtins/internal.tq",

View File

@ -1670,7 +1670,6 @@ torque_files = [
"src/builtins/function.tq",
"src/builtins/growable-fixed-array.tq",
"src/builtins/ic-callable.tq",
"src/builtins/ic-dynamic-check-maps.tq",
"src/builtins/ic.tq",
"src/builtins/internal-coverage.tq",
"src/builtins/internal.tq",

View File

@ -365,8 +365,8 @@ class Internals {
static const uint32_t kNumIsolateDataSlots = 4;
static const int kStackGuardSize = 7 * kApiSystemPointerSize;
static const int kBuiltinTier0EntryTableSize = 13 * kApiSystemPointerSize;
static const int kBuiltinTier0TableSize = 13 * kApiSystemPointerSize;
static const int kBuiltinTier0EntryTableSize = 10 * kApiSystemPointerSize;
static const int kBuiltinTier0TableSize = 10 * kApiSystemPointerSize;
// IsolateData layout guarantees.
static const int kIsolateCageBaseOffset = 0;

View File

@ -3548,10 +3548,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
}
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@ -3731,74 +3727,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
}
void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
MacroAssembler* masm) {
Generate_DynamicCheckMapsTrampoline<
DynamicCheckMapsWithFeedbackVectorDescriptor>(
masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
}
template <class Descriptor>
void Builtins::Generate_DynamicCheckMapsTrampoline(
MacroAssembler* masm, Handle<Code> builtin_target) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
// Only save the registers that the DynamicCheckMaps builtin can clobber.
Descriptor descriptor;
RegList registers = descriptor.allocatable_registers();
// FLAG_debug_code is enabled CSA checks will call C function and so we need
// to save all CallerSaved registers too.
if (FLAG_debug_code) registers |= kCallerSaved;
__ MaybeSaveRegisters(registers);
// Load the immediate arguments from the deopt exit to pass to the builtin.
Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
__ ldr(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
__ ldr(slot_arg, MemOperand(handler_arg,
Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
__ ldr(
handler_arg,
MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
__ Call(builtin_target, RelocInfo::CODE_TARGET);
Label deopt, bailout;
__ cmp_raw_immediate(r0, static_cast<int>(DynamicCheckMapsStatus::kSuccess));
__ b(ne, &deopt);
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
__ Ret();
__ bind(&deopt);
__ cmp_raw_immediate(r0, static_cast<int>(DynamicCheckMapsStatus::kBailout));
__ b(eq, &bailout);
if (FLAG_debug_code) {
__ cmp_raw_immediate(r0, static_cast<int>(DynamicCheckMapsStatus::kDeopt));
__ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus);
}
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
__ Jump(deopt_eager, RelocInfo::CODE_TARGET);
__ bind(&bailout);
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
__ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
}
#undef __
} // namespace internal

View File

@ -4067,10 +4067,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
}
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@ -4251,90 +4247,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
}
void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
MacroAssembler* masm) {
Generate_DynamicCheckMapsTrampoline<
DynamicCheckMapsWithFeedbackVectorDescriptor>(
masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
}
template <class Descriptor>
void Builtins::Generate_DynamicCheckMapsTrampoline(
MacroAssembler* masm, Handle<CodeT> builtin_target) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
// Only save the registers that the DynamicCheckMaps builtin can clobber.
Descriptor descriptor;
RegList registers = descriptor.allocatable_registers();
// FLAG_debug_code is enabled CSA checks will call C function and so we need
// to save all CallerSaved registers too.
if (FLAG_debug_code) {
registers |= RegList::FromBits(static_cast<uint32_t>(kCallerSaved.bits()));
}
__ MaybeSaveRegisters(registers);
// Load the immediate arguments from the deopt exit to pass to the builtin.
Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
// Make sure we can use x16 and x17, and add slot_arg as a temp reg if needed.
UseScratchRegisterScope temps(masm);
temps.Exclude(x16, x17);
temps.Include(slot_arg);
// Load return address into x17 and decode into handler_arg.
__ Add(x16, fp, CommonFrameConstants::kCallerSPOffset);
__ Ldr(x17, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
__ Autib1716();
__ Mov(handler_arg, x17);
#else
__ Ldr(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
#endif
__ Ldr(slot_arg, MemOperand(handler_arg,
Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
__ Ldr(
handler_arg,
MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
__ Call(builtin_target, RelocInfo::CODE_TARGET);
Label deopt, bailout;
__ CompareAndBranch(
x0, static_cast<int32_t>(DynamicCheckMapsStatus::kSuccess), ne, &deopt);
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
__ Ret();
__ Bind(&deopt);
__ CompareAndBranch(
x0, static_cast<int32_t>(DynamicCheckMapsStatus::kBailout), eq, &bailout);
if (FLAG_debug_code) {
__ Cmp(x0, Operand(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
__ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus);
}
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
Handle<CodeT> deopt_eager = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
__ Jump(deopt_eager, RelocInfo::CODE_TARGET);
__ Bind(&bailout);
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
Handle<CodeT> deopt_bailout = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
__ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
}
#undef __
} // namespace internal

View File

@ -46,11 +46,7 @@ namespace internal {
/* Deoptimization entries. */ \
ASM(DeoptimizationEntry_Eager, DeoptimizationEntry) \
ASM(DeoptimizationEntry_Soft, DeoptimizationEntry) \
ASM(DeoptimizationEntry_Bailout, DeoptimizationEntry) \
ASM(DeoptimizationEntry_Lazy, DeoptimizationEntry) \
ASM(DynamicCheckMapsTrampoline, DynamicCheckMaps) \
ASM(DynamicCheckMapsWithFeedbackVectorTrampoline, \
DynamicCheckMapsWithFeedbackVector) \
\
/* GC write barrier. */ \
TFC(RecordWriteEmitRememberedSetSaveFP, WriteBarrier) \
@ -302,10 +298,6 @@ namespace internal {
TFH(KeyedHasIC_SloppyArguments, LoadWithVector) \
TFH(HasIndexedInterceptorIC, LoadWithVector) \
\
/* Dynamic check maps */ \
TFC(DynamicCheckMaps, DynamicCheckMaps) \
TFC(DynamicCheckMapsWithFeedbackVector, DynamicCheckMapsWithFeedbackVector) \
\
/* Microtask helpers */ \
TFS(EnqueueMicrotask, kMicrotask) \
ASM(RunMicrotasksTrampoline, RunMicrotasksEntry) \

View File

@ -254,23 +254,5 @@ void Builtins::Generate_LookupContextInsideTypeofBaseline(
assembler.GenerateLookupContextBaseline(TypeofMode::kInside);
}
TF_BUILTIN(DynamicCheckMaps, CodeStubAssembler) {
auto map = Parameter<Map>(Descriptor::kMap);
auto slot = UncheckedParameter<IntPtrT>(Descriptor::kSlot);
auto handler = Parameter<Object>(Descriptor::kHandler);
TNode<Int32T> status = DynamicCheckMaps(map, slot, handler);
Return(status);
}
TF_BUILTIN(DynamicCheckMapsWithFeedbackVector, CodeStubAssembler) {
auto map = Parameter<Map>(Descriptor::kMap);
auto slot = UncheckedParameter<IntPtrT>(Descriptor::kSlot);
auto handler = Parameter<Object>(Descriptor::kHandler);
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
TNode<Int32T> status =
DynamicCheckMapsWithFeedbackVector(map, slot, handler, feedback_vector);
Return(status);
}
} // namespace internal
} // namespace v8

View File

@ -303,10 +303,6 @@ class Builtins {
static void Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode);
template <class Descriptor>
static void Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm,
Handle<CodeT> builtin_target);
#define DECLARE_ASM(Name, ...) \
static void Generate_##Name(MacroAssembler* masm);
#define DECLARE_TF(Name, ...) \

View File

@ -4162,10 +4162,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
}
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@ -4355,73 +4351,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
}
void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
MacroAssembler* masm) {
Generate_DynamicCheckMapsTrampoline<
DynamicCheckMapsWithFeedbackVectorDescriptor>(
masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
}
template <class Descriptor>
void Builtins::Generate_DynamicCheckMapsTrampoline(
MacroAssembler* masm, Handle<Code> builtin_target) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
// Only save the registers that the DynamicCheckMaps builtin can clobber.
Descriptor descriptor;
RegList registers = descriptor.allocatable_registers();
// FLAG_debug_code is enabled CSA checks will call C function and so we need
// to save all CallerSaved registers too.
if (FLAG_debug_code) registers |= kJSCallerSaved;
__ MaybeSaveRegisters(registers);
// Load the immediate arguments from the deopt exit to pass to the builtin.
Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
__ mov(handler_arg, Operand(ebp, CommonFrameConstants::kCallerPCOffset));
__ mov(slot_arg,
Operand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
__ mov(handler_arg,
Operand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
__ Call(builtin_target, RelocInfo::CODE_TARGET);
Label deopt, bailout;
__ cmp(eax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
__ j(not_equal, &deopt);
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
__ Ret();
__ bind(&deopt);
__ cmp(eax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
__ j(equal, &bailout);
if (FLAG_debug_code) {
__ cmp(eax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
__ Assert(equal, AbortReason::kUnexpectedDynamicCheckMapsStatus);
}
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
__ Jump(deopt_eager, RelocInfo::CODE_TARGET);
__ bind(&bailout);
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
__ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
}
#undef __
} // namespace internal

View File

@ -1,110 +0,0 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be:
// Context found in the LICENSE file.
namespace ic {
const kSuccess: constexpr int32
generates 'static_cast<int>(DynamicCheckMapsStatus::kSuccess)';
const kBailout: constexpr int32
generates 'static_cast<int>(DynamicCheckMapsStatus::kBailout)';
const kDeopt: constexpr int32
generates 'static_cast<int>(DynamicCheckMapsStatus::kDeopt)';
extern macro LoadFeedbackVectorForStubWithTrampoline(): FeedbackVector;
macro PerformPolymorphicCheck(
expectedPolymorphicArray: HeapObject, actualMap: Map,
actualHandler: Smi|DataHandler): int32 {
if (!Is<WeakFixedArray>(expectedPolymorphicArray)) {
return kDeopt;
}
const polymorphicArray = UnsafeCast<WeakFixedArray>(expectedPolymorphicArray);
const weakActualMap = MakeWeak(actualMap);
const length = polymorphicArray.length_intptr;
dcheck(length > 0);
for (let mapIndex: intptr = 0; mapIndex < length;
mapIndex += FeedbackIteratorEntrySize()) {
const maybeCachedMap =
UnsafeCast<WeakHeapObject>(polymorphicArray[mapIndex]);
if (maybeCachedMap == weakActualMap) {
const handlerIndex = mapIndex + FeedbackIteratorHandlerOffset();
dcheck(handlerIndex < length);
const maybeHandler =
Cast<Object>(polymorphicArray[handlerIndex]) otherwise unreachable;
if (TaggedEqual(maybeHandler, actualHandler)) {
return kSuccess;
} else {
return kDeopt;
}
}
}
return kBailout;
}
macro PerformMonomorphicCheck(
feedbackVector: FeedbackVector, slotIndex: intptr, expectedMap: HeapObject,
actualMap: Map, actualHandler: Smi|DataHandler): int32 {
if (TaggedEqual(expectedMap, actualMap)) {
const handlerIndex = slotIndex + 1;
dcheck(handlerIndex < feedbackVector.length_intptr);
const maybeHandler =
Cast<Object>(feedbackVector[handlerIndex]) otherwise unreachable;
if (TaggedEqual(actualHandler, maybeHandler)) {
return kSuccess;
}
return kDeopt;
}
return kBailout;
}
// This builtin performs map checks by dynamically looking at the
// feedback in the feedback vector.
//
// There are two major cases handled by this builtin:
// (a) Monormorphic check
// (b) Polymorphic check
//
// For the monormophic check, the incoming map is migrated and checked
// against the map and handler in the feedback vector.
//
// For the polymorphic check, the feedback vector is iterated over and
// each of the maps & handers are compared against the incoming map and
// handler.
//
// If any of the map and associated handler checks pass then we return
// kSuccess status. If we have never seen the map before, we return kBailout
// status to bailout to the interpreter and update the feedback. If we have seen
// the map, but the associated handler check fails then we return kDeopt status.
@export
macro DynamicCheckMaps(
actualMap: Map, slotIndex: intptr, actualHandler: Smi|DataHandler): int32 {
const feedbackVector = LoadFeedbackVectorForStubWithTrampoline();
return DynamicCheckMapsWithFeedbackVector(
actualMap, slotIndex, actualHandler, feedbackVector);
}
@export
macro DynamicCheckMapsWithFeedbackVector(
actualMap: Map, slotIndex: intptr, actualHandler: Smi|DataHandler,
feedbackVector: FeedbackVector): int32 {
const feedback = feedbackVector[slotIndex];
try {
const maybePolymorphicArray =
GetHeapObjectIfStrong(feedback) otherwise MigrateAndDoMonomorphicCheck;
return PerformPolymorphicCheck(
maybePolymorphicArray, actualMap, actualHandler);
} label MigrateAndDoMonomorphicCheck {
const expectedMap = GetHeapObjectAssumeWeak(feedback) otherwise Deopt;
return PerformMonomorphicCheck(
feedbackVector, slotIndex, expectedMap, actualMap, actualHandler);
} label Deopt {
return kDeopt;
}
}
} // namespace ic

View File

@ -4988,10 +4988,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
}
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@ -5169,74 +5165,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
}
void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
MacroAssembler* masm) {
Generate_DynamicCheckMapsTrampoline<
DynamicCheckMapsWithFeedbackVectorDescriptor>(
masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
}
template <class Descriptor>
void Builtins::Generate_DynamicCheckMapsTrampoline(
MacroAssembler* masm, Handle<CodeT> builtin_target) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
// Only save the registers that the DynamicCheckMaps builtin can clobber.
Descriptor descriptor;
RegList registers = descriptor.allocatable_registers();
// FLAG_debug_code is enabled CSA checks will call C function and so we need
// to save all CallerSaved registers too.
if (FLAG_debug_code) registers |= kCallerSaved;
__ MaybeSaveRegisters(registers);
// Load the immediate arguments from the deopt exit to pass to the builtin.
Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
__ movq(handler_arg, Operand(rbp, CommonFrameConstants::kCallerPCOffset));
__ movq(slot_arg, Operand(handler_arg,
Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
__ movq(
handler_arg,
Operand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
__ Call(builtin_target, RelocInfo::CODE_TARGET);
Label deopt, bailout;
__ cmpq(rax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
__ j(not_equal, &deopt);
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
__ Ret();
__ bind(&deopt);
__ cmpq(rax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
__ j(equal, &bailout);
if (FLAG_debug_code) {
__ cmpq(rax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
__ Assert(equal, AbortReason::kUnexpectedDynamicCheckMapsStatus);
}
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
Handle<CodeT> deopt_eager = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
__ Jump(deopt_eager, RelocInfo::CODE_TARGET);
__ bind(&bailout);
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
Handle<CodeT> deopt_bailout = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
__ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
}
#undef __
} // namespace internal

View File

@ -41,18 +41,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
return RegisterArray(r1, r5, r4, r2, r0, r3, kContextRegister);
}
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == r0);
return RegisterArray(r0, r1, r2, r3, cp);
}
// static
constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == r0);
return RegisterArray(r0, r1, r2, r3, cp);
}
// static
constexpr Register LoadDescriptor::ReceiverRegister() { return r1; }
// static

View File

@ -2652,12 +2652,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
if (kind == DeoptimizeKind::kEagerWithResume) {
b(ret);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
Deoptimizer::kEagerWithResumeBeforeArgsSize);
}
// The above code must not emit constants either.
DCHECK(!has_pending_constants());
}

View File

@ -42,18 +42,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
return RegisterArray(x1, x5, x4, x2, x0, x3, kContextRegister, x7);
}
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == x0);
return RegisterArray(x0, x1, x2, x3, cp);
}
// static
constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == x0);
return RegisterArray(x0, x1, x2, x3, cp);
}
// static
constexpr Register LoadDescriptor::ReceiverRegister() { return x1; }
// static

View File

@ -2222,12 +2222,6 @@ void TurboAssembler::CallForDeoptimization(
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
if (kind == DeoptimizeKind::kEagerWithResume) {
b(ret);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
Deoptimizer::kEagerWithResumeBeforeArgsSize);
}
}
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {

View File

@ -10,85 +10,84 @@
namespace v8 {
namespace internal {
#define ABORT_MESSAGES_LIST(V) \
V(kNoReason, "no reason") \
\
V(k32BitValueInRegisterIsNotZeroExtended, \
"32 bit value in register is not zero-extended") \
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
V(kExpectedOptimizationSentinel, \
"Expected optimized code cell or optimization sentinel") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
V(kExpectedFeedbackVector, "Expected feedback vector") \
V(kExpectedBaselineData, "Expected baseline data") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kInputStringTooLong, "Input string too long") \
V(kInvalidBytecode, "Invalid bytecode") \
V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidJumpTableIndex, "Invalid jump table index") \
V(kInvalidParametersAndRegistersInGenerator, \
"invalid parameters and registers in generator") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
V(kObjectNotTagged, "The object is not tagged") \
V(kObjectTagged, "The object is tagged") \
V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAConstructor, \
"Operand is a smi and not a constructor") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAGeneratorObject, \
"Operand is a smi and not a generator object") \
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAConstructor, "Operand is not a constructor") \
V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotACallableFunction, "Operand is not a callable function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotACodeT, "Operand is not a CodeT") \
V(kOperandIsNotASmi, "Operand is not a smi") \
V(kPromiseAlreadySettled, "Promise already settled") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
V(kShouldNotDirectlyEnterOsrFunction, \
"Should not directly enter OSR-compiled function") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUnexpectedAdditionalPopValue, "Unexpected additional pop value") \
V(kUnexpectedDynamicCheckMapsStatus, "Unexpected dynamic map checks status") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
V(kUnexpectedFunctionIDForInvokeIntrinsic, \
"Unexpected runtime function id for the InvokeIntrinsic bytecode") \
V(kUnexpectedInitialMapForArrayFunction, \
"Unexpected initial map for Array function") \
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
V(kUnexpectedNegativeValue, "Unexpected negative value") \
V(kUnexpectedReturnFromFrameDropper, \
"Unexpectedly returned from dropping frames") \
V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
V(kUnexpectedReturnFromWasmTrap, \
"Should not return after throwing a wasm trap") \
V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
V(kUnexpectedValue, "Unexpected value") \
V(kUnsupportedModuleOperation, "Unsupported module operation") \
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
V(kWrongAddressOrValuePassedToRecordWrite, \
"Wrong address or value passed to RecordWrite") \
V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \
V(kWrongFunctionCodeStart, "Wrong value in code start register passed") \
V(kWrongFunctionContext, "Wrong context passed to function") \
V(kUnexpectedThreadInWasmSet, "thread_in_wasm flag was already set") \
#define ABORT_MESSAGES_LIST(V) \
V(kNoReason, "no reason") \
\
V(k32BitValueInRegisterIsNotZeroExtended, \
"32 bit value in register is not zero-extended") \
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
V(kExpectedOptimizationSentinel, \
"Expected optimized code cell or optimization sentinel") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
V(kExpectedFeedbackVector, "Expected feedback vector") \
V(kExpectedBaselineData, "Expected baseline data") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kInputStringTooLong, "Input string too long") \
V(kInvalidBytecode, "Invalid bytecode") \
V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidJumpTableIndex, "Invalid jump table index") \
V(kInvalidParametersAndRegistersInGenerator, \
"invalid parameters and registers in generator") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
V(kObjectNotTagged, "The object is not tagged") \
V(kObjectTagged, "The object is tagged") \
V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAConstructor, \
"Operand is a smi and not a constructor") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAGeneratorObject, \
"Operand is a smi and not a generator object") \
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAConstructor, "Operand is not a constructor") \
V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotACallableFunction, "Operand is not a callable function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotACodeT, "Operand is not a CodeT") \
V(kOperandIsNotASmi, "Operand is not a smi") \
V(kPromiseAlreadySettled, "Promise already settled") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
V(kShouldNotDirectlyEnterOsrFunction, \
"Should not directly enter OSR-compiled function") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUnexpectedAdditionalPopValue, "Unexpected additional pop value") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
V(kUnexpectedFunctionIDForInvokeIntrinsic, \
"Unexpected runtime function id for the InvokeIntrinsic bytecode") \
V(kUnexpectedInitialMapForArrayFunction, \
"Unexpected initial map for Array function") \
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
V(kUnexpectedNegativeValue, "Unexpected negative value") \
V(kUnexpectedReturnFromFrameDropper, \
"Unexpectedly returned from dropping frames") \
V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
V(kUnexpectedReturnFromWasmTrap, \
"Should not return after throwing a wasm trap") \
V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
V(kUnexpectedValue, "Unexpected value") \
V(kUnsupportedModuleOperation, "Unsupported module operation") \
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
V(kWrongAddressOrValuePassedToRecordWrite, \
"Wrong address or value passed to RecordWrite") \
V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \
V(kWrongFunctionCodeStart, "Wrong value in code start register passed") \
V(kWrongFunctionContext, "Wrong context passed to function") \
V(kUnexpectedThreadInWasmSet, "thread_in_wasm flag was already set") \
V(kUnexpectedThreadInWasmUnset, "thread_in_wasm flag was not set")
#define BAILOUT_MESSAGES_LIST(V) \

View File

@ -35,20 +35,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
return RegisterArray(edi, ecx, edx, esi, kReturnRegister0);
}
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
STATIC_ASSERT(esi == kContextRegister);
STATIC_ASSERT(eax == kReturnRegister0);
return RegisterArray(eax, ecx, edx, edi, esi);
}
// static
constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
STATIC_ASSERT(esi == kContextRegister);
STATIC_ASSERT(eax == kReturnRegister0);
return RegisterArray(eax, ecx, edx, edi, esi);
}
// static
constexpr Register LoadDescriptor::ReceiverRegister() { return edx; }
// static

View File

@ -2036,16 +2036,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
if (kind == DeoptimizeKind::kEagerWithResume) {
bool old_predictable_code_size = predictable_code_size();
set_predictable_code_size(true);
jmp(ret);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
Deoptimizer::kEagerWithResumeBeforeArgsSize);
set_predictable_code_size(old_predictable_code_size);
}
}
void TurboAssembler::Trap() { int3(); }

View File

@ -69,8 +69,6 @@ namespace internal {
V(ConstructWithSpread_WithFeedback) \
V(ContextOnly) \
V(CppBuiltinAdaptor) \
V(DynamicCheckMaps) \
V(DynamicCheckMapsWithFeedbackVector) \
V(FastNewObject) \
V(ForInPrepare) \
V(GetIteratorStackParameter) \
@ -1065,39 +1063,6 @@ class LoadGlobalWithVectorDescriptor
static constexpr auto registers();
};
class DynamicCheckMapsDescriptor final
: public StaticCallInterfaceDescriptor<DynamicCheckMapsDescriptor> {
public:
DEFINE_PARAMETERS(kMap, kSlot, kHandler)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int32(), // return val
MachineType::TaggedPointer(), // kMap
MachineType::IntPtr(), // kSlot
MachineType::TaggedSigned()) // kHandler
DECLARE_DESCRIPTOR(DynamicCheckMapsDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
};
class DynamicCheckMapsWithFeedbackVectorDescriptor final
: public StaticCallInterfaceDescriptor<
DynamicCheckMapsWithFeedbackVectorDescriptor> {
public:
DEFINE_PARAMETERS(kMap, kFeedbackVector, kSlot, kHandler)
DEFINE_RESULT_AND_PARAMETER_TYPES(
MachineType::Int32(), // return val
MachineType::TaggedPointer(), // kMap
MachineType::TaggedPointer(), // kFeedbackVector
MachineType::IntPtr(), // kSlot
MachineType::TaggedSigned()) // kHandler
DECLARE_DESCRIPTOR(DynamicCheckMapsWithFeedbackVectorDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
};
class FastNewObjectDescriptor
: public StaticCallInterfaceDescriptor<FastNewObjectDescriptor> {
public:

View File

@ -53,30 +53,6 @@ constexpr auto TSANLoadDescriptor::registers() {
}
#endif // V8_IS_TSAN
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
#if V8_TARGET_OS_WIN
return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
kRuntimeCallFunctionRegister, kContextRegister);
#else
STATIC_ASSERT(kContextRegister == arg_reg_2);
return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
kRuntimeCallFunctionRegister);
#endif // V8_TARGET_OS_WIN
}
// static
constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
#if V8_TARGET_OS_WIN
return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
kRuntimeCallFunctionRegister, kContextRegister);
#else
STATIC_ASSERT(kContextRegister == arg_reg_2);
return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
kRuntimeCallFunctionRegister);
#endif // V8_TARGET_OS_WIN
}
// static
constexpr Register LoadDescriptor::ReceiverRegister() { return rdx; }
// static

View File

@ -3111,16 +3111,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
if (kind == DeoptimizeKind::kEagerWithResume) {
bool old_predictable_code_size = predictable_code_size();
set_predictable_code_size(true);
jmp(ret);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
Deoptimizer::kEagerWithResumeBeforeArgsSize);
set_predictable_code_size(old_predictable_code_size);
}
}
void TurboAssembler::Trap() { int3(); }

View File

@ -520,21 +520,13 @@ constexpr int kNoDeoptimizationId = -1;
// code is executed.
// - Soft: similar to lazy deoptimization, but does not contribute to the
// total deopt count which can lead to disabling optimization for a function.
// - Bailout: a check failed in the optimized code but we don't
// deoptimize the code, but try to heal the feedback and try to rerun
// the optimized code again.
// - EagerWithResume: a check failed in the optimized code, but we can execute
// a more expensive check in a builtin that might either result in us resuming
// execution in the optimized code, or deoptimizing immediately.
enum class DeoptimizeKind : uint8_t {
kEager,
kSoft,
kBailout,
kLazy,
kEagerWithResume,
};
constexpr DeoptimizeKind kFirstDeoptimizeKind = DeoptimizeKind::kEager;
constexpr DeoptimizeKind kLastDeoptimizeKind = DeoptimizeKind::kEagerWithResume;
constexpr DeoptimizeKind kLastDeoptimizeKind = DeoptimizeKind::kLazy;
STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
constexpr int kDeoptimizeKindCount = static_cast<int>(kLastDeoptimizeKind) + 1;
inline size_t hash_value(DeoptimizeKind kind) {
@ -548,10 +540,6 @@ inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
return os << "Soft";
case DeoptimizeKind::kLazy:
return os << "Lazy";
case DeoptimizeKind::kBailout:
return os << "Bailout";
case DeoptimizeKind::kEagerWithResume:
return os << "EagerMaybeResume";
}
}
@ -1818,12 +1806,6 @@ enum class TraceRetainingPathMode { kEnabled, kDisabled };
// can be used in Torque.
enum class VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
enum class DynamicCheckMapsStatus : uint8_t {
kSuccess = 0,
kBailout = 1,
kDeopt = 2
};
#ifdef V8_COMPRESS_POINTERS
class PtrComprCageBase {
public:

View File

@ -178,20 +178,6 @@ PropertyAccessInfo PropertyAccessInfo::DictionaryProtoAccessorConstant(
constant, property_name, {{receiver_map}, zone});
}
// static
MinimorphicLoadPropertyAccessInfo MinimorphicLoadPropertyAccessInfo::DataField(
int offset, bool is_inobject, Representation field_representation,
Type field_type) {
return MinimorphicLoadPropertyAccessInfo(kDataField, offset, is_inobject,
field_representation, field_type);
}
// static
MinimorphicLoadPropertyAccessInfo MinimorphicLoadPropertyAccessInfo::Invalid() {
return MinimorphicLoadPropertyAccessInfo(
kInvalid, -1, false, Representation::None(), Type::None());
}
PropertyAccessInfo::PropertyAccessInfo(Zone* zone)
: kind_(kInvalid),
lookup_start_object_maps_(zone),
@ -262,15 +248,6 @@ PropertyAccessInfo::PropertyAccessInfo(
dictionary_index_(dictionary_index),
name_{name} {}
MinimorphicLoadPropertyAccessInfo::MinimorphicLoadPropertyAccessInfo(
Kind kind, int offset, bool is_inobject,
Representation field_representation, Type field_type)
: kind_(kind),
is_inobject_(is_inobject),
offset_(offset),
field_representation_(field_representation),
field_type_(field_type) {}
namespace {
template <class RefT>
@ -682,20 +659,6 @@ PropertyAccessInfo AccessInfoFactory::ComputeDictionaryProtoAccessInfo(
access_mode, get_accessors);
}
MinimorphicLoadPropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback) const {
DCHECK(feedback.handler()->IsSmi());
int handler = Smi::cast(*feedback.handler()).value();
bool is_inobject = LoadHandler::IsInobjectBits::decode(handler);
bool is_double = LoadHandler::IsDoubleBits::decode(handler);
int offset = LoadHandler::FieldIndexBits::decode(handler) * kTaggedSize;
Representation field_rep =
is_double ? Representation::Double() : Representation::Tagged();
Type field_type = is_double ? Type::Number() : Type::Any();
return MinimorphicLoadPropertyAccessInfo::DataField(offset, is_inobject,
field_rep, field_type);
}
bool AccessInfoFactory::TryLoadPropertyDetails(
MapRef map, base::Optional<JSObjectRef> maybe_holder, NameRef name,
InternalIndex* index_out, PropertyDetails* details_out) const {

View File

@ -22,7 +22,6 @@ class CompilationDependencies;
class CompilationDependency;
class ElementAccessFeedback;
class JSHeapBroker;
class MinimorphicLoadPropertyAccessFeedback;
class TypeCache;
struct ConstFieldInfo;
@ -214,36 +213,6 @@ class PropertyAccessInfo final {
base::Optional<NameRef> name_;
};
// This class encapsulates information required to generate load properties
// by only using the information from handlers. This information is used with
// dynamic map checks.
class MinimorphicLoadPropertyAccessInfo final {
public:
enum Kind { kInvalid, kDataField };
static MinimorphicLoadPropertyAccessInfo DataField(
int offset, bool is_inobject, Representation field_representation,
Type field_type);
static MinimorphicLoadPropertyAccessInfo Invalid();
bool IsInvalid() const { return kind_ == kInvalid; }
bool IsDataField() const { return kind_ == kDataField; }
int offset() const { return offset_; }
int is_inobject() const { return is_inobject_; }
Type field_type() const { return field_type_; }
Representation field_representation() const { return field_representation_; }
private:
MinimorphicLoadPropertyAccessInfo(Kind kind, int offset, bool is_inobject,
Representation field_representation,
Type field_type);
Kind kind_;
bool is_inobject_;
int offset_;
Representation field_representation_;
Type field_type_;
};
// Factory class for {ElementAccessInfo}s and {PropertyAccessInfo}s.
class AccessInfoFactory final {
public:
@ -264,9 +233,6 @@ class AccessInfoFactory final {
InternalIndex dict_index, AccessMode access_mode,
PropertyDetails details) const;
MinimorphicLoadPropertyAccessInfo ComputePropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback) const;
// Merge as many of the given {infos} as possible and record any dependencies.
// Return false iff any of them was invalid, in which case no dependencies are
// recorded.

View File

@ -3298,24 +3298,19 @@ void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {
__ ForceConstantPoolEmissionWithoutJump();
// We are conservative here, assuming all deopts are eager with resume deopts.
DCHECK_GE(Deoptimizer::kEagerWithResumeDeoptExitSize,
Deoptimizer::kLazyDeoptExitSize);
// We are conservative here, reserving sufficient space for the largest deopt
// kind.
DCHECK_GE(Deoptimizer::kLazyDeoptExitSize,
Deoptimizer::kNonLazyDeoptExitSize);
__ CheckVeneerPool(false, false,
static_cast<int>(exits->size()) *
Deoptimizer::kEagerWithResumeDeoptExitSize);
__ CheckVeneerPool(
false, false,
static_cast<int>(exits->size()) * Deoptimizer::kLazyDeoptExitSize);
// Check which deopt kinds exist in this Code object, to avoid emitting jumps
// to unused entries.
bool saw_deopt_kind[kDeoptimizeKindCount] = {false};
bool saw_deopt_with_resume_reason[kDeoptimizeReasonCount] = {false};
for (auto exit : *exits) {
saw_deopt_kind[static_cast<int>(exit->kind())] = true;
if (exit->kind() == DeoptimizeKind::kEagerWithResume) {
saw_deopt_with_resume_reason[static_cast<int>(exit->reason())] = true;
}
}
// Emit the jumps to deoptimization entries.
@ -3325,21 +3320,9 @@ void CodeGenerator::PrepareForDeoptimizationExits(
for (int i = 0; i < kDeoptimizeKindCount; i++) {
if (!saw_deopt_kind[i]) continue;
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
if (kind == DeoptimizeKind::kEagerWithResume) {
for (int j = 0; j < kDeoptimizeReasonCount; j++) {
if (!saw_deopt_with_resume_reason[j]) continue;
DeoptimizeReason reason = static_cast<DeoptimizeReason>(j);
__ bind(&jump_deoptimization_or_resume_entry_labels_[j]);
__ LoadEntryFromBuiltin(Deoptimizer::GetDeoptWithResumeBuiltin(reason),
scratch);
__ Jump(scratch);
}
} else {
__ bind(&jump_deoptimization_entry_labels_[i]);
__ LoadEntryFromBuiltin(Deoptimizer::GetDeoptimizationEntry(kind),
scratch);
__ Jump(scratch);
}
__ bind(&jump_deoptimization_entry_labels_[i]);
__ LoadEntryFromBuiltin(Deoptimizer::GetDeoptimizationEntry(kind), scratch);
__ Jump(scratch);
}
}

View File

@ -151,58 +151,6 @@ uint32_t CodeGenerator::GetStackCheckOffset() {
return std::max(frame_height_delta, max_pushed_argument_bytes);
}
void CodeGenerator::AssembleDeoptImmediateArgs(
const ZoneVector<ImmediateOperand*>* immediate_args, Label* deopt_exit) {
// EagerWithResume deopts should have immdiate args, and to ensure fixed
// deopt exit sizes, currently always have two immediate arguments in the
// deopt exit.
constexpr int kImmediateArgCount = 2;
DCHECK_NOT_NULL(immediate_args);
DCHECK_EQ(kImmediateArgCount, immediate_args->size());
const int expected_offsets[] = {
Deoptimizer::kEagerWithResumeImmedArgs1PcOffset,
Deoptimizer::kEagerWithResumeImmedArgs2PcOffset};
for (int i = 0; i < kImmediateArgCount; i++) {
ImmediateOperand* op = immediate_args->at(i);
Constant constant = instructions()->GetImmediate(op);
DCHECK_EQ(tasm()->SizeOfCodeGeneratedSince(deopt_exit),
expected_offsets[i] + Deoptimizer::kNonLazyDeoptExitSize);
USE(expected_offsets);
switch (constant.type()) {
case Constant::kInt32:
tasm()->dp(constant.ToInt32(), RelocInfo::LITERAL_CONSTANT);
break;
#ifdef V8_TARGET_ARCH_64_BIT
case Constant::kInt64:
tasm()->dp(constant.ToInt64());
break;
#endif
case Constant::kFloat64: {
int smi;
CHECK(DoubleToSmiInteger(constant.ToFloat64().value(), &smi));
tasm()->dp(Smi::FromInt(smi).ptr(), RelocInfo::LITERAL_CONSTANT);
break;
}
case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
// Emit as a DATA_EMBEDDED_OBJECT to specify that this is a raw full
// pointer that is fixed size.
tasm()->dp(constant.ToHeapObject().address(),
RelocInfo::DATA_EMBEDDED_OBJECT);
break;
default:
// Currently only Smis and Ints are supported, but other immediate
// constants can be added when required.
UNREACHABLE();
}
}
DCHECK_EQ(tasm()->SizeOfCodeGeneratedSince(deopt_exit),
Deoptimizer::kEagerWithResumeDeoptExitSize);
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
DeoptimizationExit* exit) {
int deoptimization_id = exit->deoptimization_id();
@ -212,15 +160,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
DeoptimizeKind deopt_kind = exit->kind();
DeoptimizeReason deoptimization_reason = exit->reason();
Label* jump_deoptimization_entry_label;
if (deopt_kind == DeoptimizeKind::kEagerWithResume) {
jump_deoptimization_entry_label =
&jump_deoptimization_or_resume_entry_labels_[static_cast<int>(
deoptimization_reason)];
} else {
jump_deoptimization_entry_label =
&jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
}
Label* jump_deoptimization_entry_label =
&jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
if (info()->source_positions()) {
tasm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
exit->pos(), deoptimization_id);
@ -230,22 +171,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
++lazy_deopt_count_;
tasm()->BindExceptionHandler(exit->label());
} else {
if (deopt_kind != DeoptimizeKind::kEagerWithResume) {
++eager_soft_and_bailout_deopt_count_;
}
++non_lazy_deopt_count_;
tasm()->bind(exit->label());
}
Builtin target =
deopt_kind == DeoptimizeKind::kEagerWithResume
? Deoptimizer::GetDeoptWithResumeBuiltin(deoptimization_reason)
: Deoptimizer::GetDeoptimizationEntry(deopt_kind);
Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind);
tasm()->CallForDeoptimization(target, deoptimization_id, exit->label(),
deopt_kind, exit->continue_label(),
jump_deoptimization_entry_label);
if (deopt_kind == DeoptimizeKind::kEagerWithResume) {
AssembleDeoptImmediateArgs(exit->immediate_args(), exit->label());
}
exit->set_emitted();
return kSuccess;
@ -407,12 +340,10 @@ void CodeGenerator::AssembleCode() {
// lazy deopts and eagerwithresume might need additional instructions.
auto cmp = [](const DeoptimizationExit* a, const DeoptimizationExit* b) {
// The deoptimization exits are sorted so that lazy deopt exits appear after
// eager deopts, and eager with resume deopts appear last.
static_assert(DeoptimizeKind::kEagerWithResume == kLastDeoptimizeKind,
"eager with resume deopts are expected to be emitted last");
// eager deopts.
static_assert(static_cast<int>(DeoptimizeKind::kLazy) ==
static_cast<int>(kLastDeoptimizeKind) - 1,
"lazy deopts are expected to be emitted second from last");
static_cast<int>(kLastDeoptimizeKind),
"lazy deopts are expected to be emitted last");
if (a->kind() != b->kind()) {
return a->kind() < b->kind();
}
@ -975,8 +906,7 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
data->SetEagerSoftAndBailoutDeoptCount(
Smi::FromInt(eager_soft_and_bailout_deopt_count_));
data->SetNonLazyDeoptCount(Smi::FromInt(non_lazy_deopt_count_));
data->SetLazyDeoptCount(Smi::FromInt(lazy_deopt_count_));
if (info->has_shared_info()) {

View File

@ -236,9 +236,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
CodeGenResult AssembleDeoptimizerCall(DeoptimizationExit* exit);
void AssembleDeoptImmediateArgs(
const ZoneVector<ImmediateOperand*>* immediate_args, Label* deopt_exit);
// ===========================================================================
// ============= Architecture-specific code generation methods. ==============
// ===========================================================================
@ -424,7 +421,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
ZoneVector<HandlerInfo> handlers_;
int next_deoptimization_id_ = 0;
int deopt_exit_start_offset_ = 0;
int eager_soft_and_bailout_deopt_count_ = 0;
int non_lazy_deopt_count_ = 0;
int lazy_deopt_count_ = 0;
ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
@ -440,7 +437,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// per Code object. All deopt exits can then near-call to this label. Note:
// not used on all architectures.
Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
Label jump_deoptimization_or_resume_entry_labels_[kDeoptimizeReasonCount];
// The maximal combined height of all frames produced upon deoptimization, and
// the maximal number of pushed arguments for function calls. Applied as an

View File

@ -1456,8 +1456,6 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitDeoptimizeIf(node);
case IrOpcode::kDeoptimizeUnless:
return VisitDeoptimizeUnless(node);
case IrOpcode::kDynamicCheckMapsWithDeoptUnless:
return VisitDynamicCheckMapsWithDeoptUnless(node);
case IrOpcode::kTrapIf:
return VisitTrapIf(node, TrapIdOf(node->op()));
case IrOpcode::kTrapUnless:
@ -3164,46 +3162,6 @@ void InstructionSelector::VisitSelect(Node* node) {
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) {
OperandGenerator g(this);
DynamicCheckMapsWithDeoptUnlessNode n(node);
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
CallDescriptor* call_descriptor;
ZoneVector<InstructionOperand> dynamic_check_args(zone());
if (p.reason() == DeoptimizeReason::kDynamicCheckMaps) {
DynamicCheckMapsDescriptor descriptor;
// Note: We use Operator::kNoDeopt here because this builtin does not lazy
// deoptimize (which is the meaning of Operator::kNoDeopt), even though it
// can eagerly deoptimize.
call_descriptor = Linkage::GetStubCallDescriptor(
zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoDeopt | Operator::kNoThrow);
dynamic_check_args.insert(
dynamic_check_args.end(),
{g.UseLocation(n.map(), call_descriptor->GetInputLocation(1)),
g.UseImmediate(n.slot()), g.UseImmediate(n.handler())});
} else {
DCHECK_EQ(p.reason(), DeoptimizeReason::kDynamicCheckMapsInlined);
DynamicCheckMapsWithFeedbackVectorDescriptor descriptor;
call_descriptor = Linkage::GetStubCallDescriptor(
zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoDeopt | Operator::kNoThrow);
dynamic_check_args.insert(
dynamic_check_args.end(),
{g.UseLocation(n.map(), call_descriptor->GetInputLocation(1)),
g.UseLocation(n.feedback_vector(),
call_descriptor->GetInputLocation(2)),
g.UseImmediate(n.slot()), g.UseImmediate(n.handler())});
}
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
VisitWordCompareZero(node, n.condition(), &cont);
}
void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
FlagsContinuation cont =
FlagsContinuation::ForTrap(kNotEqual, trap_id, node->InputAt(1));

View File

@ -87,8 +87,7 @@ std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
DCHECK(op->opcode() == IrOpcode::kDeoptimize ||
op->opcode() == IrOpcode::kDeoptimizeIf ||
op->opcode() == IrOpcode::kDeoptimizeUnless ||
op->opcode() == IrOpcode::kDynamicCheckMapsWithDeoptUnless);
op->opcode() == IrOpcode::kDeoptimizeUnless);
return OpParameter<DeoptimizeParameters>(op);
}
@ -501,10 +500,6 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(Eager, WrongInstanceType) \
V(Eager, WrongMap)
#define CACHED_DYNAMIC_CHECK_MAPS_LIST(V) \
V(DynamicCheckMaps) \
V(DynamicCheckMapsInlined)
#define CACHED_TRAP_IF_LIST(V) \
V(TrapDivUnrepresentable) \
V(TrapFloatUnrepresentable)
@ -735,22 +730,6 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
template <DeoptimizeReason kReason>
struct DynamicMapCheckOperator final : Operator1<DeoptimizeParameters> {
DynamicMapCheckOperator()
: Operator1<DeoptimizeParameters>( // --
IrOpcode::kDynamicCheckMapsWithDeoptUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
"DynamicCheckMapsWithDeoptUnless", // name
6, 1, 1, 0, 1, 1, // counts
DeoptimizeParameters(DeoptimizeKind::kEagerWithResume, kReason,
FeedbackSource())) {}
};
#define CACHED_DYNAMIC_CHECK_MAPS(Reason) \
DynamicMapCheckOperator<DeoptimizeReason::k##Reason> k##Reason##Operator;
CACHED_DYNAMIC_CHECK_MAPS_LIST(CACHED_DYNAMIC_CHECK_MAPS)
#undef CACHED_DYNAMIC_CHECK_MAPS
template <TrapId trap_id>
struct TrapIfOperator final : public Operator1<TrapId> {
TrapIfOperator()
@ -983,15 +962,6 @@ const Operator* CommonOperatorBuilder::DeoptimizeUnless(
parameter); // parameter
}
const Operator* CommonOperatorBuilder::DynamicCheckMapsWithDeoptUnless(
bool is_inlined_frame_state) {
if (is_inlined_frame_state) {
return &cache_.kDynamicCheckMapsInlinedOperator;
} else {
return &cache_.kDynamicCheckMapsOperator;
}
}
const Operator* CommonOperatorBuilder::TrapIf(TrapId trap_id) {
switch (trap_id) {
#define CACHED_TRAP_IF(Trap) \

View File

@ -462,10 +462,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
FeedbackSource const& feedback);
const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback);
// DynamicCheckMapsWithDeoptUnless will call the dynamic map check builtin if
// the condition is false, which may then either deoptimize or resume
// execution.
const Operator* DynamicCheckMapsWithDeoptUnless(bool is_inlined_frame_state);
const Operator* TrapIf(TrapId trap_id);
const Operator* TrapUnless(TrapId trap_id);
const Operator* Return(int value_input_count = 1);
@ -730,27 +726,6 @@ class StartNode final : public CommonNodeWrapperBase {
int LastOutputIndex() const { return ContextOutputIndex(); }
};
class DynamicCheckMapsWithDeoptUnlessNode final : public CommonNodeWrapperBase {
public:
explicit constexpr DynamicCheckMapsWithDeoptUnlessNode(Node* node)
: CommonNodeWrapperBase(node) {
DCHECK_EQ(IrOpcode::kDynamicCheckMapsWithDeoptUnless, node->opcode());
}
#define INPUTS(V) \
V(Condition, condition, 0, BoolT) \
V(Slot, slot, 1, IntPtrT) \
V(Map, map, 2, Map) \
V(Handler, handler, 3, Object) \
V(FeedbackVector, feedback_vector, 4, FeedbackVector)
INPUTS(DEFINE_INPUT_ACCESSORS)
#undef INPUTS
FrameState frame_state() {
return FrameState{NodeProperties::GetValueInput(node(), 5)};
}
};
#undef DEFINE_INPUT_ACCESSORS
} // namespace compiler

View File

@ -80,7 +80,6 @@ class EffectControlLinearizer {
Node* LowerChangeTaggedToTaggedSigned(Node* node);
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
void LowerCheckMaps(Node* node, Node* frame_state);
void LowerDynamicCheckMaps(Node* node, Node* frame_state);
Node* LowerCompareMaps(Node* node);
Node* LowerCheckNumber(Node* node, Node* frame_state);
Node* LowerCheckClosure(Node* node, Node* frame_state);
@ -980,9 +979,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckMaps:
LowerCheckMaps(node, frame_state);
break;
case IrOpcode::kDynamicCheckMaps:
LowerDynamicCheckMaps(node, frame_state);
break;
case IrOpcode::kCompareMaps:
result = LowerCompareMaps(node);
break;
@ -1933,56 +1929,6 @@ void EffectControlLinearizer::TryMigrateInstance(Node* value, Node* value_map) {
__ Bind(&done);
}
void EffectControlLinearizer::LowerDynamicCheckMaps(Node* node,
Node* frame_state_node) {
DynamicCheckMapsParameters const& p =
DynamicCheckMapsParametersOf(node->op());
FrameState frame_state(frame_state_node);
Node* value = node->InputAt(0);
FeedbackSource const& feedback = p.feedback();
Node* feedback_vector = __ HeapConstant(feedback.vector);
Node* slot_index = __ IntPtrConstant(feedback.index());
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* actual_handler =
p.handler()->IsSmi()
? __ SmiConstant(Smi::ToInt(*p.handler()))
: __ HeapConstant(Handle<HeapObject>::cast(p.handler()));
auto done = __ MakeLabel();
ZoneHandleSet<Map> maps = p.maps();
size_t const map_count = maps.size();
for (size_t i = 0; i < map_count; ++i) {
Node* map = __ HeapConstant(maps[i]);
Node* check = __ TaggedEqual(value_map, map);
if (i == map_count - 1) {
if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
auto migrate = __ MakeDeferredLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &migrate);
__ Bind(&migrate);
TryMigrateInstance(value, value_map);
// Reload the current map of the {value} before performing the dynanmic
// map check.
value_map = __ LoadField(AccessBuilder::ForMap(), value);
}
__ DynamicCheckMapsWithDeoptUnless(check, slot_index, value_map,
actual_handler, feedback_vector,
frame_state);
__ Goto(&done);
} else {
auto next_map = __ MakeLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &next_map);
__ Bind(&next_map);
}
}
__ Bind(&done);
}
Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
size_t const map_count = maps.size();

View File

@ -535,18 +535,6 @@ Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
frame_state);
}
Node* GraphAssembler::DynamicCheckMapsWithDeoptUnless(Node* condition,
Node* slot_index,
Node* value, Node* map,
Node* feedback_vector,
FrameState frame_state) {
return AddNode(graph()->NewNode(
common()->DynamicCheckMapsWithDeoptUnless(
frame_state.outer_frame_state()->opcode() == IrOpcode::kFrameState),
condition, slot_index, value, map, feedback_vector, frame_state, effect(),
control()));
}
TNode<Object> GraphAssembler::Call(const CallDescriptor* call_descriptor,
int inputs_size, Node** inputs) {
return Call(common()->Call(call_descriptor), inputs_size, inputs);

View File

@ -333,10 +333,6 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* frame_state);
Node* DeoptimizeIfNot(DeoptimizeReason reason, FeedbackSource const& feedback,
Node* condition, Node* frame_state);
Node* DynamicCheckMapsWithDeoptUnless(Node* condition, Node* slot_index,
Node* map, Node* handler,
Node* feedback_vector,
FrameState frame_state);
TNode<Object> Call(const CallDescriptor* call_descriptor, int inputs_size,
Node** inputs);
TNode<Object> Call(const Operator* op, int inputs_size, Node** inputs);

View File

@ -250,11 +250,6 @@ bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source,
return feedback.AsNamedAccess().maps().empty();
} else if (feedback.kind() == ProcessedFeedback::kInsufficient) {
return false;
} else if (feedback.kind() == ProcessedFeedback::kMinimorphicPropertyAccess) {
// MinimorphicPropertyAccess is used for dynamic map checks and the IC state
// is either monomorphic or polymorphic. So it will still benefit from
// collecting feedback, so don't use megamorphic builtin.
return false;
}
UNREACHABLE();
}

View File

@ -49,6 +49,11 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
cage_base_(isolate),
#endif // V8_COMPRESS_POINTERS
zone_(broker_zone),
// Note that this initialization of {refs_} with the minimal initial
// capacity is redundant in the normal use case (concurrent compilation
// enabled, standard objects to be serialized), as the map is going to be
// replaced immediately with a larger-capacity one. It doesn't seem to
// affect the performance in a noticeable way though.
refs_(zone()->New<RefsMap>(kMinimalRefsBucketCount, AddressMatcher(),
zone())),
root_index_map_(isolate),
@ -56,13 +61,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
tracing_enabled_(tracing_enabled),
code_kind_(code_kind),
feedback_(zone()),
property_access_infos_(zone()),
minimorphic_property_access_infos_(zone()) {
// Note that this initialization of {refs_} with the minimal initial capacity
// is redundant in the normal use case (concurrent compilation enabled,
// standard objects to be serialized), as the map is going to be replaced
// immediately with a larger-capacity one. It doesn't seem to affect the
// performance in a noticeable way though.
property_access_infos_(zone()) {
TRACE(this, "Constructing heap broker");
}
@ -426,18 +425,6 @@ bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
return true;
}
// TODO(v8:12552): Remove.
MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback(
NameRef const& name, FeedbackSlotKind slot_kind, Handle<Object> handler,
ZoneVector<MapRef> const& maps, bool has_migration_target_maps)
: ProcessedFeedback(kMinimorphicPropertyAccess, slot_kind),
name_(name),
handler_(handler),
maps_(maps),
has_migration_target_maps_(has_migration_target_maps) {
DCHECK(IsLoadICKind(slot_kind));
}
NamedAccessFeedback::NamedAccessFeedback(NameRef const& name,
ZoneVector<MapRef> const& maps,
FeedbackSlotKind slot_kind)
@ -909,29 +896,6 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
return access_info;
}
// TODO(v8:12552): Remove.
MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source) {
auto it = minimorphic_property_access_infos_.find(source);
if (it != minimorphic_property_access_infos_.end()) return it->second;
AccessInfoFactory factory(this, nullptr, zone());
MinimorphicLoadPropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(feedback);
// We can assume a memory fence on {source.vector} because in production,
// the vector has already passed the gc predicate. Unit tests create
// FeedbackSource objects directly from handles, but they run on
// the main thread.
TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
<< source.index() << " "
<< MakeRefAssumeMemoryFence<Object>(this, source.vector));
minimorphic_property_access_infos_.insert({source, access_info});
return access_info;
}
BinaryOperationFeedback const& ProcessedFeedback::AsBinaryOperation() const {
CHECK_EQ(kBinaryOperation, kind());
return *static_cast<BinaryOperationFeedback const*>(this);
@ -972,13 +936,6 @@ NamedAccessFeedback const& ProcessedFeedback::AsNamedAccess() const {
return *static_cast<NamedAccessFeedback const*>(this);
}
// TODO(v8:12552): Remove.
MinimorphicLoadPropertyAccessFeedback const&
ProcessedFeedback::AsMinimorphicPropertyAccess() const {
CHECK_EQ(kMinimorphicPropertyAccess, kind());
return *static_cast<MinimorphicLoadPropertyAccessFeedback const*>(this);
}
LiteralFeedback const& ProcessedFeedback::AsLiteral() const {
CHECK_EQ(kLiteral, kind());
return *static_cast<LiteralFeedback const*>(this);

View File

@ -234,10 +234,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
MapRef map, NameRef name, AccessMode access_mode,
CompilationDependencies* dependencies);
MinimorphicLoadPropertyAccessInfo GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source);
StringRef GetTypedArrayStringTag(ElementsKind kind);
bool IsMainThread() const {
@ -456,9 +452,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ZoneUnorderedMap<PropertyAccessTarget, PropertyAccessInfo,
PropertyAccessTarget::Hash, PropertyAccessTarget::Equal>
property_access_infos_;
ZoneUnorderedMap<FeedbackSource, MinimorphicLoadPropertyAccessInfo,
FeedbackSource::Hash, FeedbackSource::Equal>
minimorphic_property_access_infos_;
CompilationDependencies* dependencies_ = nullptr;

View File

@ -1037,55 +1037,6 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
}
}
Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
Node* node, Node* value,
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSLoadNamedFromSuper);
STATIC_ASSERT(JSLoadNamedNode::ObjectIndex() == 0 &&
JSLoadPropertyNode::ObjectIndex() == 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* lookup_start_object;
if (node->opcode() == IrOpcode::kJSLoadNamedFromSuper) {
DCHECK(FLAG_super_ic);
JSLoadNamedFromSuperNode n(node);
// Lookup start object is the __proto__ of the home object.
lookup_start_object = effect =
BuildLoadPrototypeFromObject(n.home_object(), effect, control);
} else {
lookup_start_object = NodeProperties::GetValueInput(node, 0);
}
MinimorphicLoadPropertyAccessInfo access_info =
broker()->GetPropertyAccessInfo(feedback, source);
if (access_info.IsInvalid()) return NoChange();
PropertyAccessBuilder access_builder(jsgraph(), broker(), nullptr);
CheckMapsFlags flags = CheckMapsFlag::kNone;
if (feedback.has_migration_target_maps()) {
flags |= CheckMapsFlag::kTryMigrateInstance;
}
ZoneHandleSet<Map> maps;
for (const MapRef& map : feedback.maps()) {
maps.insert(map.object(), graph()->zone());
}
effect = graph()->NewNode(
simplified()->DynamicCheckMaps(flags, feedback.handler(), maps, source),
lookup_start_object, effect, control);
value = access_builder.BuildMinimorphicLoadDataField(
feedback.name(), access_info, lookup_start_object, &effect, &control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* node, Node* value, NamedAccessFeedback const& feedback,
AccessMode access_mode, Node* key) {
@ -2013,11 +1964,6 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess(
case ProcessedFeedback::kNamedAccess:
return ReduceNamedAccess(node, value, feedback.AsNamedAccess(),
access_mode, key);
case ProcessedFeedback::kMinimorphicPropertyAccess:
DCHECK_EQ(access_mode, AccessMode::kLoad);
DCHECK_NULL(key);
return ReduceMinimorphicPropertyAccess(
node, value, feedback.AsMinimorphicPropertyAccess(), source);
case ProcessedFeedback::kElementAccess:
DCHECK_EQ(feedback.AsElementAccess().keyed_mode().access_mode(),
access_mode);

View File

@ -107,10 +107,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceNamedAccess(Node* node, Node* value,
NamedAccessFeedback const& feedback,
AccessMode access_mode, Node* key = nullptr);
Reduction ReduceMinimorphicPropertyAccess(
Node* node, Node* value,
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source);
Reduction ReduceGlobalAccess(Node* node, Node* lookup_start_object,
Node* receiver, Node* value, NameRef const& name,
AccessMode access_mode, Node* key,

View File

@ -29,7 +29,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kDebugBreak:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kDynamicCheckMapsWithDeoptUnless:
case IrOpcode::kEffectPhi:
case IrOpcode::kIfException:
case IrOpcode::kLoad:

View File

@ -10,28 +10,27 @@
#include "src/common/globals.h"
// Opcodes for control operators.
#define CONTROL_OP_LIST(V) \
V(Start) \
V(Loop) \
V(Branch) \
V(Switch) \
V(IfTrue) \
V(IfFalse) \
V(IfSuccess) \
V(IfException) \
V(IfValue) \
V(IfDefault) \
V(Merge) \
V(Deoptimize) \
V(DeoptimizeIf) \
V(DeoptimizeUnless) \
V(DynamicCheckMapsWithDeoptUnless) \
V(TrapIf) \
V(TrapUnless) \
V(Return) \
V(TailCall) \
V(Terminate) \
V(Throw) \
#define CONTROL_OP_LIST(V) \
V(Start) \
V(Loop) \
V(Branch) \
V(Switch) \
V(IfTrue) \
V(IfFalse) \
V(IfSuccess) \
V(IfException) \
V(IfValue) \
V(IfDefault) \
V(Merge) \
V(Deoptimize) \
V(DeoptimizeIf) \
V(DeoptimizeUnless) \
V(TrapIf) \
V(TrapUnless) \
V(Return) \
V(TailCall) \
V(Terminate) \
V(Throw) \
V(End)
// Opcodes for constant operators.
@ -421,7 +420,6 @@
V(ConvertTaggedHoleToUndefined) \
V(DateNow) \
V(DelayedStringConstant) \
V(DynamicCheckMaps) \
V(EnsureWritableFastElements) \
V(FastApiCall) \
V(FindOrderedHashMapEntry) \

View File

@ -20,7 +20,6 @@ class ForInFeedback;
class GlobalAccessFeedback;
class InstanceOfFeedback;
class LiteralFeedback;
class MinimorphicLoadPropertyAccessFeedback;
class NamedAccessFeedback;
class RegExpLiteralFeedback;
class TemplateObjectFeedback;
@ -37,7 +36,6 @@ class ProcessedFeedback : public ZoneObject {
kGlobalAccess,
kInstanceOf,
kLiteral,
kMinimorphicPropertyAccess,
kNamedAccess,
kRegExpLiteral,
kTemplateObject,
@ -55,8 +53,6 @@ class ProcessedFeedback : public ZoneObject {
GlobalAccessFeedback const& AsGlobalAccess() const;
InstanceOfFeedback const& AsInstanceOf() const;
NamedAccessFeedback const& AsNamedAccess() const;
MinimorphicLoadPropertyAccessFeedback const& AsMinimorphicPropertyAccess()
const;
LiteralFeedback const& AsLiteral() const;
RegExpLiteralFeedback const& AsRegExpLiteral() const;
TemplateObjectFeedback const& AsTemplateObject() const;
@ -173,27 +169,6 @@ class NamedAccessFeedback : public ProcessedFeedback {
ZoneVector<MapRef> const maps_;
};
class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback {
public:
MinimorphicLoadPropertyAccessFeedback(NameRef const& name,
FeedbackSlotKind slot_kind,
Handle<Object> handler,
ZoneVector<MapRef> const& maps,
bool has_migration_target_maps);
NameRef const& name() const { return name_; }
bool is_monomorphic() const { return maps_.size() == 1; }
Handle<Object> handler() const { return handler_; }
ZoneVector<MapRef> const& maps() const { return maps_; }
bool has_migration_target_maps() const { return has_migration_target_maps_; }
private:
NameRef const name_;
Handle<Object> const handler_;
ZoneVector<MapRef> const maps_;
bool const has_migration_target_maps_;
};
class CallFeedback : public ProcessedFeedback {
public:
CallFeedback(base::Optional<HeapObjectRef> target, float frequency,

View File

@ -276,26 +276,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
return value;
}
Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField(
NameRef const& name, MinimorphicLoadPropertyAccessInfo const& access_info,
Node* lookup_start_object, Node** effect, Node** control) {
DCHECK_NULL(dependencies());
MachineRepresentation const field_representation =
ConvertRepresentation(access_info.field_representation());
FieldAccess field_access = {
kTaggedBase,
access_info.offset(),
name.object(),
MaybeHandle<Map>(),
access_info.field_type(),
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
ConstFieldInfo::None()};
return BuildLoadDataField(name, lookup_start_object, field_access,
access_info.is_inobject(), effect, control);
}
Node* PropertyAccessBuilder::BuildLoadDataField(
NameRef const& name, PropertyAccessInfo const& access_info,
Node* lookup_start_object, Node** effect, Node** control) {

View File

@ -59,12 +59,6 @@ class PropertyAccessBuilder {
base::Optional<Node*> FoldLoadDictPrototypeConstant(
PropertyAccessInfo const& access_info);
// Builds the load for data-field access for minimorphic loads that use
// dynamic map checks. These cannot depend on any information from the maps.
Node* BuildMinimorphicLoadDataField(
NameRef const& name, MinimorphicLoadPropertyAccessInfo const& access_info,
Node* lookup_start_object, Node** effect, Node** control);
static MachineRepresentation ConvertRepresentation(
Representation representation);

View File

@ -3881,11 +3881,6 @@ class RepresentationSelector {
node, UseInfo::CheckedHeapObjectAsTaggedPointer(p.feedback()),
MachineRepresentation::kNone);
}
case IrOpcode::kDynamicCheckMaps: {
return VisitUnop<T>(
node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
MachineRepresentation::kNone);
}
case IrOpcode::kTransitionElementsKind: {
return VisitUnop<T>(
node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),

View File

@ -270,36 +270,6 @@ CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
return OpParameter<CheckMapsParameters>(op);
}
bool operator==(DynamicCheckMapsParameters const& lhs,
DynamicCheckMapsParameters const& rhs) {
// FeedbackSource is sufficient as an equality check. FeedbackSource uniquely
// determines all other properties (handler, flags and the monomorphic map
DCHECK_IMPLIES(lhs.feedback() == rhs.feedback(),
lhs.flags() == rhs.flags() && lhs.state() == rhs.state() &&
lhs.handler().address() == rhs.handler().address() &&
lhs.maps() == rhs.maps());
return lhs.feedback() == rhs.feedback();
}
size_t hash_value(DynamicCheckMapsParameters const& p) {
FeedbackSource::Hash feedback_hash;
// FeedbackSource is sufficient for hashing. FeedbackSource uniquely
// determines all other properties (handler, flags and the monomorphic map
return base::hash_combine(feedback_hash(p.feedback()));
}
std::ostream& operator<<(std::ostream& os,
DynamicCheckMapsParameters const& p) {
return os << p.handler() << ", " << p.feedback() << "," << p.state() << ","
<< p.flags() << "," << p.maps();
}
DynamicCheckMapsParameters const& DynamicCheckMapsParametersOf(
Operator const* op) {
DCHECK_EQ(IrOpcode::kDynamicCheckMaps, op->opcode());
return OpParameter<DynamicCheckMapsParameters>(op);
}
ZoneHandleSet<Map> const& CompareMapsParametersOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kCompareMaps, op->opcode());
return OpParameter<ZoneHandleSet<Map>>(op);
@ -1484,18 +1454,6 @@ const Operator* SimplifiedOperatorBuilder::CheckMaps(
parameters); // parameter
}
const Operator* SimplifiedOperatorBuilder::DynamicCheckMaps(
CheckMapsFlags flags, Handle<Object> handler,
ZoneHandleSet<Map> const& maps, const FeedbackSource& feedback) {
DynamicCheckMapsParameters const parameters(flags, handler, maps, feedback);
return zone()->New<Operator1<DynamicCheckMapsParameters>>( // --
IrOpcode::kDynamicCheckMaps, // opcode
Operator::kNoThrow | Operator::kNoWrite, // flags
"DynamicCheckMaps", // name
1, 1, 1, 0, 1, 0, // counts
parameters); // parameter
}
const Operator* SimplifiedOperatorBuilder::MapGuard(ZoneHandleSet<Map> maps) {
DCHECK_LT(0, maps.size());
return zone()->New<Operator1<ZoneHandleSet<Map>>>( // --

View File

@ -439,41 +439,6 @@ std::ostream& operator<<(std::ostream&, CheckMapsParameters const&);
CheckMapsParameters const& CheckMapsParametersOf(Operator const*)
V8_WARN_UNUSED_RESULT;
// A descriptor for dynamic map checks.
class DynamicCheckMapsParameters final {
public:
enum ICState { kMonomorphic, kPolymorphic };
DynamicCheckMapsParameters(CheckMapsFlags flags, Handle<Object> handler,
ZoneHandleSet<Map> const& maps,
const FeedbackSource& feedback)
: flags_(flags), handler_(handler), maps_(maps), feedback_(feedback) {}
CheckMapsFlags flags() const { return flags_; }
Handle<Object> handler() const { return handler_; }
ZoneHandleSet<Map> const& maps() const { return maps_; }
FeedbackSource const& feedback() const { return feedback_; }
ICState state() const {
return maps_.size() == 1 ? ICState::kMonomorphic : ICState::kPolymorphic;
}
private:
CheckMapsFlags const flags_;
Handle<Object> const handler_;
ZoneHandleSet<Map> const maps_;
FeedbackSource const feedback_;
};
bool operator==(DynamicCheckMapsParameters const&,
DynamicCheckMapsParameters const&);
size_t hash_value(DynamicCheckMapsParameters const&);
std::ostream& operator<<(std::ostream&, DynamicCheckMapsParameters const&);
DynamicCheckMapsParameters const& DynamicCheckMapsParametersOf(Operator const*)
V8_WARN_UNUSED_RESULT;
ZoneHandleSet<Map> const& MapGuardMapsOf(Operator const*) V8_WARN_UNUSED_RESULT;
// Parameters for CompareMaps operator.
@ -927,9 +892,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckInternalizedString();
const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>,
const FeedbackSource& = FeedbackSource());
const Operator* DynamicCheckMaps(CheckMapsFlags flags, Handle<Object> handler,
ZoneHandleSet<Map> const& maps,
const FeedbackSource& feedback);
const Operator* CheckNotTaggedHole();
const Operator* CheckNumber(const FeedbackSource& feedback);
const Operator* CheckReceiver();

View File

@ -115,7 +115,6 @@ class Typer::Visitor : public Reducer {
DECLARE_IMPOSSIBLE_CASE(Deoptimize)
DECLARE_IMPOSSIBLE_CASE(DeoptimizeIf)
DECLARE_IMPOSSIBLE_CASE(DeoptimizeUnless)
DECLARE_IMPOSSIBLE_CASE(DynamicCheckMapsWithDeoptUnless)
DECLARE_IMPOSSIBLE_CASE(TrapIf)
DECLARE_IMPOSSIBLE_CASE(TrapUnless)
DECLARE_IMPOSSIBLE_CASE(Return)
@ -2102,7 +2101,6 @@ Type Typer::Visitor::TypeCheckInternalizedString(Node* node) {
}
Type Typer::Visitor::TypeCheckMaps(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeDynamicCheckMaps(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeCompareMaps(Node* node) { return Type::Boolean(); }

View File

@ -370,7 +370,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kDynamicCheckMapsWithDeoptUnless:
case IrOpcode::kPlug:
case IrOpcode::kTrapIf:
case IrOpcode::kTrapUnless:
@ -1448,10 +1447,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Any());
CheckNotTyped(node);
break;
case IrOpcode::kDynamicCheckMaps:
CheckValueInputIs(node, 0, Type::Any());
CheckNotTyped(node);
break;
case IrOpcode::kCompareMaps:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());

View File

@ -17,18 +17,11 @@ namespace internal {
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Bailout);
#undef ASSERT_OFFSET
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 3 * kInstrSize;
const int Deoptimizer::kEagerWithResumeDeoptExitSize =
kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
kInstrSize + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
const int kShift = n % 2 == 0 ? 0 : 32;

View File

@ -16,12 +16,6 @@ const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
#else
const int Deoptimizer::kLazyDeoptExitSize = 1 * kInstrSize;
#endif
const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 2 * kInstrSize;
const int Deoptimizer::kEagerWithResumeDeoptExitSize =
kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
kInstrSize + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(

View File

@ -17,8 +17,6 @@ namespace internal {
V(CouldNotGrowElements, "failed to grow elements store") \
V(DeoptimizeNow, "%_DeoptimizeNow") \
V(DivisionByZero, "division by zero") \
V(DynamicCheckMaps, "dynamic check maps failed") \
V(DynamicCheckMapsInlined, "dynamic check maps failed") \
V(Hole, "hole") \
V(InstanceMigrationFailed, "instance migration failed") \
V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call") \

View File

@ -468,10 +468,6 @@ const char* Deoptimizer::MessageFor(DeoptimizeKind kind) {
return "deopt-soft";
case DeoptimizeKind::kLazy:
return "deopt-lazy";
case DeoptimizeKind::kBailout:
return "bailout";
case DeoptimizeKind::kEagerWithResume:
return "eager-with-resume";
}
}
@ -541,21 +537,14 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
DeoptimizationData::cast(compiled_code_.deoptimization_data());
Address deopt_start = compiled_code_.raw_instruction_start() +
deopt_data.DeoptExitStart().value();
int eager_soft_and_bailout_deopt_count =
deopt_data.EagerSoftAndBailoutDeoptCount().value();
int non_lazy_deopt_count = deopt_data.NonLazyDeoptCount().value();
Address lazy_deopt_start =
deopt_start +
eager_soft_and_bailout_deopt_count * kNonLazyDeoptExitSize;
int lazy_deopt_count = deopt_data.LazyDeoptCount().value();
Address eager_with_resume_deopt_start =
lazy_deopt_start + lazy_deopt_count * kLazyDeoptExitSize;
deopt_start + non_lazy_deopt_count * kNonLazyDeoptExitSize;
// The deoptimization exits are sorted so that lazy deopt exits appear after
// eager deopts, and eager with resume deopts appear last.
static_assert(DeoptimizeKind::kEagerWithResume == kLastDeoptimizeKind,
"eager with resume deopts are expected to be emitted last");
// eager deopts.
static_assert(static_cast<int>(DeoptimizeKind::kLazy) ==
static_cast<int>(kLastDeoptimizeKind) - 1,
"lazy deopts are expected to be emitted second from last");
static_cast<int>(kLastDeoptimizeKind),
"lazy deopts are expected to be emitted last");
// from_ is the value of the link register after the call to the
// deoptimizer, so for the last lazy deopt, from_ points to the first
// non-lazy deopt, so we use <=, similarly for the last non-lazy deopt and
@ -565,19 +554,11 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
static_cast<int>(from_ - kNonLazyDeoptExitSize - deopt_start);
DCHECK_EQ(0, offset % kNonLazyDeoptExitSize);
deopt_exit_index_ = offset / kNonLazyDeoptExitSize;
} else if (from_ <= eager_with_resume_deopt_start) {
} else {
int offset =
static_cast<int>(from_ - kLazyDeoptExitSize - lazy_deopt_start);
DCHECK_EQ(0, offset % kLazyDeoptExitSize);
deopt_exit_index_ =
eager_soft_and_bailout_deopt_count + (offset / kLazyDeoptExitSize);
} else {
int offset = static_cast<int>(from_ - kNonLazyDeoptExitSize -
eager_with_resume_deopt_start);
DCHECK_EQ(0, offset % kEagerWithResumeDeoptExitSize);
deopt_exit_index_ = eager_soft_and_bailout_deopt_count +
lazy_deopt_count +
(offset / kEagerWithResumeDeoptExitSize);
deopt_exit_index_ = non_lazy_deopt_count + (offset / kLazyDeoptExitSize);
}
}
}
@ -617,32 +598,14 @@ void Deoptimizer::DeleteFrameDescriptions() {
#endif // DEBUG
}
Builtin Deoptimizer::GetDeoptWithResumeBuiltin(DeoptimizeReason reason) {
switch (reason) {
case DeoptimizeReason::kDynamicCheckMaps:
return Builtin::kDynamicCheckMapsTrampoline;
case DeoptimizeReason::kDynamicCheckMapsInlined:
return Builtin::kDynamicCheckMapsWithFeedbackVectorTrampoline;
default:
UNREACHABLE();
}
}
Builtin Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind kind) {
switch (kind) {
case DeoptimizeKind::kEager:
return Builtin::kDeoptimizationEntry_Eager;
case DeoptimizeKind::kSoft:
return Builtin::kDeoptimizationEntry_Soft;
case DeoptimizeKind::kBailout:
return Builtin::kDeoptimizationEntry_Bailout;
case DeoptimizeKind::kLazy:
return Builtin::kDeoptimizationEntry_Lazy;
case DeoptimizeKind::kEagerWithResume:
// EagerWithResume deopts will call a special builtin (specified by
// GetDeoptWithResumeBuiltin) which will itself select the deoptimization
// entry builtin if it decides to deopt instead of resuming execution.
UNREACHABLE();
}
}
@ -658,9 +621,6 @@ bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
case Builtin::kDeoptimizationEntry_Soft:
*type_out = DeoptimizeKind::kSoft;
return true;
case Builtin::kDeoptimizationEntry_Bailout:
*type_out = DeoptimizeKind::kBailout;
return true;
case Builtin::kDeoptimizationEntry_Lazy:
*type_out = DeoptimizeKind::kLazy;
return true;

View File

@ -93,11 +93,6 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
// Returns the builtin that will perform a check and either eagerly deopt with
// |reason| or resume execution in the optimized code.
V8_EXPORT_PRIVATE static Builtin GetDeoptWithResumeBuiltin(
DeoptimizeReason reason);
V8_EXPORT_PRIVATE static Builtin GetDeoptimizationEntry(DeoptimizeKind kind);
// Returns true if {addr} is a deoptimization entry and stores its type in
@ -139,10 +134,6 @@ class Deoptimizer : public Malloced {
// kSupportsFixedDeoptExitSizes is true.
V8_EXPORT_PRIVATE static const int kNonLazyDeoptExitSize;
V8_EXPORT_PRIVATE static const int kLazyDeoptExitSize;
V8_EXPORT_PRIVATE static const int kEagerWithResumeBeforeArgsSize;
V8_EXPORT_PRIVATE static const int kEagerWithResumeDeoptExitSize;
V8_EXPORT_PRIVATE static const int kEagerWithResumeImmedArgs1PcOffset;
V8_EXPORT_PRIVATE static const int kEagerWithResumeImmedArgs2PcOffset;
// Tracing.
static void TraceMarkForDeoptimization(Code code, const char* reason);

View File

@ -12,12 +12,6 @@ namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 5;
const int Deoptimizer::kLazyDeoptExitSize = 5;
const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 10;
const int Deoptimizer::kEagerWithResumeDeoptExitSize =
kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = 5;
const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
5 + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(

View File

@ -19,17 +19,11 @@ namespace internal {
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Bailout);
#undef ASSERT_OFFSET
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 4;
const int Deoptimizer::kLazyDeoptExitSize = 4;
const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 9;
const int Deoptimizer::kEagerWithResumeDeoptExitSize =
kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = 5;
const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset = 13;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(

View File

@ -1245,7 +1245,7 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
DEFINE_DEOPT_ELEMENT_ACCESSORS(DeoptExitStart, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(EagerSoftAndBailoutDeoptCount, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(NonLazyDeoptCount, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(LazyDeoptCount, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)

View File

@ -1072,7 +1072,7 @@ class DeoptimizationData : public FixedArray {
static const int kSharedFunctionInfoIndex = 6;
static const int kInliningPositionsIndex = 7;
static const int kDeoptExitStartIndex = 8;
static const int kEagerSoftAndBailoutDeoptCountIndex = 9;
static const int kNonLazyDeoptCountIndex = 9;
static const int kLazyDeoptCountIndex = 10;
static const int kFirstDeoptEntryIndex = 11;
@ -1101,7 +1101,7 @@ class DeoptimizationData : public FixedArray {
DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
DECL_ELEMENT_ACCESSORS(DeoptExitStart, Smi)
DECL_ELEMENT_ACCESSORS(EagerSoftAndBailoutDeoptCount, Smi)
DECL_ELEMENT_ACCESSORS(NonLazyDeoptCount, Smi)
DECL_ELEMENT_ACCESSORS(LazyDeoptCount, Smi)
#undef DECL_ELEMENT_ACCESSORS

View File

@ -1535,22 +1535,13 @@ TEST(DeoptExitSizeIsFixed) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
masm.bind(&before_exit);
if (kind == DeoptimizeKind::kEagerWithResume) {
Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
DeoptimizeReason::kDynamicCheckMaps);
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
Deoptimizer::kEagerWithResumeBeforeArgsSize);
} else {
Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
kind == DeoptimizeKind::kLazy
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
}
Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
kind == DeoptimizeKind::kLazy
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
}
}

View File

@ -324,22 +324,13 @@ TEST(DeoptExitSizeIsFixed) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
masm.bind(&before_exit);
if (kind == DeoptimizeKind::kEagerWithResume) {
Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
DeoptimizeReason::kDynamicCheckMaps);
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
Deoptimizer::kEagerWithResumeBeforeArgsSize);
} else {
Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
kind == DeoptimizeKind::kLazy
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
}
Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
kind == DeoptimizeKind::kLazy
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
}
}

View File

@ -106,30 +106,20 @@ TEST(DeoptExitSizeIsFixed) {
for (int i = 0; i < kDeoptimizeKindCount; i++) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
if (kind == DeoptimizeKind::kEagerWithResume) {
masm.bind(&before_exit);
Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
DeoptimizeReason::kDynamicCheckMaps);
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
&before_exit);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
Deoptimizer::kEagerWithResumeBeforeArgsSize);
Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
// Mirroring logic in code-generator.cc.
if (kind == DeoptimizeKind::kLazy) {
// CFI emits an extra instruction here.
masm.BindExceptionHandler(&before_exit);
} else {
Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
// Mirroring logic in code-generator.cc.
if (kind == DeoptimizeKind::kLazy) {
// CFI emits an extra instruction here.
masm.BindExceptionHandler(&before_exit);
} else {
masm.bind(&before_exit);
}
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
&before_exit);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
kind == DeoptimizeKind::kLazy
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
masm.bind(&before_exit);
}
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
&before_exit);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
kind == DeoptimizeKind::kLazy
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
}
}

View File

@ -1063,22 +1063,13 @@ TEST(DeoptExitSizeIsFixed) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
masm.bind(&before_exit);
if (kind == DeoptimizeKind::kEagerWithResume) {
Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
DeoptimizeReason::kDynamicCheckMaps);
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
Deoptimizer::kEagerWithResumeBeforeArgsSize);
} else {
Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
kind == DeoptimizeKind::kLazy
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
}
Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
kind == DeoptimizeKind::kLazy
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
}
}