[sparkplug][arm][arm64[ia32] Callee-saved registers for RecordWrite

Migrate the remaining architectures to the new callee save RecordWrite
approach.

Bug: v8:11420
Change-Id: I9da56cbb5bf8c6ca4bcc7c0e2a1233e2f5ef587c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2944844
Commit-Queue: Camillo Bruni <cbruni@chromium.org>
Reviewed-by: Santiago Aboy Solanes <solanes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75075}
This commit is contained in:
Camillo Bruni 2021-06-10 14:33:29 +02:00 committed by V8 LUCI CQ
parent 86952023f1
commit c5d41ae6d2
22 changed files with 80 additions and 115 deletions

View File

@ -349,6 +349,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
DCHECK(!AreAliased(target, value));
__ str(value, FieldMemOperand(target, offset));
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
SaveFPRegsMode::kIgnore);

View File

@ -713,8 +713,6 @@ void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() {
}
void BaselineCompiler::VisitStaContextSlot() {
// TODO(cbruni): enable on all platforms
#if V8_TARGET_ARCH_X64
Register value = WriteBarrierDescriptor::ValueRegister();
Register context = WriteBarrierDescriptor::ObjectRegister();
DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister));
@ -724,37 +722,17 @@ void BaselineCompiler::VisitStaContextSlot() {
for (; depth > 0; --depth) {
__ LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
#else
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register context = scratch_scope.AcquireScratch();
LoadRegister(context, 0);
int depth = Uint(2);
for (; depth > 0; --depth) {
__ LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
Register value = scratch_scope.AcquireScratch();
__ Move(value, kInterpreterAccumulatorRegister);
#endif // V8_TARGET_ARCH_X64
__ StoreTaggedFieldWithWriteBarrier(
context, Context::OffsetOfElementAt(iterator().GetIndexOperand(1)),
value);
}
void BaselineCompiler::VisitStaCurrentContextSlot() {
// TODO(cbruni): enable on all platforms
#if V8_TARGET_ARCH_X64
Register value = WriteBarrierDescriptor::ValueRegister();
Register context = WriteBarrierDescriptor::ObjectRegister();
DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister));
__ Move(value, kInterpreterAccumulatorRegister);
__ LoadContext(context);
#else
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register context = scratch_scope.AcquireScratch();
__ LoadContext(context);
Register value = scratch_scope.AcquireScratch();
__ Move(value, kInterpreterAccumulatorRegister);
#endif // V8_TARGET_ARCH_X64
__ StoreTaggedFieldWithWriteBarrier(
context, Context::OffsetOfElementAt(Index(0)), value);
}
@ -879,8 +857,6 @@ void BaselineCompiler::VisitLdaModuleVariable() {
}
void BaselineCompiler::VisitStaModuleVariable() {
// TODO(cbruni): enable on all platforms
#if V8_TARGET_ARCH_X64
int cell_index = Int(0);
if (V8_UNLIKELY(cell_index < 0)) {
// Not supported (probably never).
@ -906,33 +882,6 @@ void BaselineCompiler::VisitStaModuleVariable() {
cell_index -= 1;
__ LoadFixedArrayElement(scratch, scratch, cell_index);
__ StoreTaggedFieldWithWriteBarrier(scratch, Cell::kValueOffset, value);
#else // V8_TARGET_ARCH_X64
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register scratch = scratch_scope.AcquireScratch();
__ LoadContext(scratch);
int depth = Uint(1);
for (; depth > 0; --depth) {
__ LoadTaggedPointerField(scratch, scratch, Context::kPreviousOffset);
}
__ LoadTaggedPointerField(scratch, scratch, Context::kExtensionOffset);
int cell_index = Int(0);
if (cell_index > 0) {
__ LoadTaggedPointerField(scratch, scratch,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
__ LoadFixedArrayElement(scratch, scratch, cell_index);
SaveAccumulatorScope save_accumulator(&basm_);
__ StoreTaggedFieldWithWriteBarrier(scratch, Cell::kValueOffset,
kInterpreterAccumulatorRegister);
} else {
// Not supported (probably never).
CallRuntime(Runtime::kAbort,
Smi::FromInt(static_cast<int>(
AbortReason::kUnsupportedModuleOperation)));
__ Trap();
}
#endif // V8_TARGET_ARCH_X64
}
void BaselineCompiler::VisitStaNamedProperty() {

View File

@ -333,12 +333,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- r1 : the JSGeneratorObject to resume
// -- lr : return address
// -----------------------------------
__ AssertGeneratorObject(r1);
// Store input value into generator object.
__ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0,
kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Check that r1 is still valid, RecordWrite might have clobbered it.
__ AssertGeneratorObject(r1);
// Load suspended function and context.
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
@ -793,6 +793,7 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register optimized_code,
Register closure) {
DCHECK(!AreAliased(optimized_code, closure));
// Store code entry in the closure.
__ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
@ -999,6 +1000,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
static void LoadOptimizationStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_marker) {
DCHECK(!AreAliased(optimization_state, feedback_vector));
__ RecordComment("[ Check optimization state");
__ ldr(optimization_state,
@ -1014,6 +1016,7 @@ static void LoadOptimizationStateAndJumpIfNeedsProcessing(
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
__ tst(

View File

@ -437,13 +437,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- x1 : the JSGeneratorObject to resume
// -- lr : return address
// -----------------------------------
__ AssertGeneratorObject(x1);
// Store input value into generator object.
__ StoreTaggedField(
x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0,
kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Check that x1 is still valid, RecordWrite might have clobbered it.
__ AssertGeneratorObject(x1);
// Load suspended function and context.
__ LoadTaggedPointerField(
@ -963,6 +964,7 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register optimized_code,
Register closure) {
DCHECK(!AreAliased(optimized_code, closure));
// Store code entry in the closure.
__ StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset));
@ -1179,6 +1181,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
static void LoadOptimizationStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_marker) {
DCHECK(!AreAliased(optimization_state, feedback_vector));
__ RecordComment("[ Check optimization state");
__ Ldr(optimization_state,
@ -1194,6 +1197,7 @@ static void LoadOptimizationStateAndJumpIfNeedsProcessing(
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
__ TestAndBranchIfAllClear(

View File

@ -592,12 +592,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- edx : the JSGeneratorObject to resume
// -- esp[0] : return address
// -----------------------------------
__ AssertGeneratorObject(edx);
// Store input value into generator object.
__ mov(FieldOperand(edx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
__ RecordWriteField(edx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
Register object = WriteBarrierDescriptor::ObjectRegister();
__ mov(object, edx);
__ RecordWriteField(object, JSGeneratorObject::kInputOrDebugPosOffset, eax,
WriteBarrierDescriptor::SlotAddressRegister(),
SaveFPRegsMode::kIgnore);
// Check that edx is still valid, RecordWrite might have clobbered it.
__ AssertGeneratorObject(edx);
// Load suspended function and context.
__ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
@ -731,12 +734,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register optimized_code,
Register closure,
Register scratch1,
Register scratch2) {
Register value,
Register slot_address) {
// Store the optimized code in the closure.
__ mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
__ mov(value, optimized_code); // Write barrier clobbers slot_address below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit,
SmiCheck::kOmit);
}
@ -816,9 +819,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
__ Push(optimized_code_entry);
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, edx,
eax);
ecx);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ Pop(optimized_code_entry);
__ LoadCodeObjectEntry(ecx, optimized_code_entry);
__ Pop(edx);
__ Pop(eax);
@ -1255,9 +1260,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(ecx, BaselineData::kBaselineCodeOffset));
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ push(edx); // Spill.
__ push(ecx);
__ Push(xmm0, eax); // Save the argument count (currently in xmm0).
ReplaceClosureCodeWithOptimizedCode(masm, ecx, closure, eax, edx);
ReplaceClosureCodeWithOptimizedCode(masm, ecx, closure, eax, ecx);
__ pop(eax); // Restore the argument count.
__ pop(ecx);
__ pop(edx);
__ JumpCodeObject(ecx);

View File

@ -681,7 +681,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- rdx : the JSGeneratorObject to resume
// -- rsp[0] : return address
// -----------------------------------
__ AssertGeneratorObject(rdx);
// Store input value into generator object.
__ StoreTaggedField(
@ -691,6 +690,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ RecordWriteField(object, JSGeneratorObject::kInputOrDebugPosOffset, rax,
WriteBarrierDescriptor::SlotAddressRegister(),
SaveFPRegsMode::kIgnore);
// Check that rdx is still valid, RecordWrite might have clobbered it.
__ AssertGeneratorObject(rdx);
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;

View File

@ -38,8 +38,7 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static
constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == r0);
return RegisterArray(r0, r1, r2, r3, r4);
return RegisterArray(r1, r5, r4, r2, r0);
}
// static

View File

@ -672,6 +672,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!AreAliased(object, value, scratch));
add(scratch, object, Operand(offset - kHeapObjectTag));
tst(scratch, Operand(kPointerSize - 1));
b(eq, &ok);
@ -810,13 +811,12 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
SmiCheck smi_check) {
DCHECK(!AreAliased(object, value));
if (FLAG_debug_code) {
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
add(scratch, object, offset);
ldr(scratch, MemOperand(scratch));
cmp(scratch, value);
}
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!AreAliased(object, value, scratch));
add(scratch, object, offset);
ldr(scratch, MemOperand(scratch));
cmp(scratch, value);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@ -843,12 +843,18 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
if (lr_status == kLRHasNotBeenSaved) {
push(lr);
}
CallRecordWriteStubSaveRegisters(object, offset, remembered_set_action,
fp_mode);
Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
DCHECK(!AreAliased(object, value, slot_address));
DCHECK(!offset.IsRegister());
add(slot_address, object, offset);
CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
}
if (FLAG_debug_code) Move(slot_address, Operand(kZapValue));
bind(&done);
}
@ -2573,6 +2579,7 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
Label* condition_met) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!AreAliased(object, scratch));
DCHECK(cc == eq || cc == ne);
Bfc(scratch, object, 0, kPageSizeBits);
ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));

View File

@ -38,8 +38,7 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static
constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == x0);
return RegisterArray(x0, x1, x2, x3, x4);
return RegisterArray(x1, x5, x4, x2, x0, x3);
}
// static

View File

@ -2921,6 +2921,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
DCHECK(!AreAliased(object, value, scratch));
Add(scratch, object, offset - kHeapObjectTag);
Tst(scratch, kTaggedSize - 1);
B(eq, &ok);
@ -3059,7 +3060,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
DCHECK(!AreAliased(object, value, temp));
Add(temp, object, offset);
LoadTaggedPointerField(temp, MemOperand(temp));
Cmp(temp, value);
@ -3090,11 +3091,16 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
if (lr_status == kLRHasNotBeenSaved) {
Push<TurboAssembler::kSignLR>(padreg, lr);
}
CallRecordWriteStubSaveRegisters(object, offset, remembered_set_action,
fp_mode);
Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
DCHECK(!AreAliased(object, slot_address, value));
// TODO(cbruni): Turn offset into int.
DCHECK(offset.IsImmediate());
Add(slot_address, object, offset);
CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
if (lr_status == kLRHasNotBeenSaved) {
Pop<TurboAssembler::kAuthLR>(lr, padreg);
}
if (FLAG_debug_code) Mov(slot_address, Operand(kZapValue));
Bind(&done);
}

View File

@ -32,8 +32,7 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static
constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(esi == kContextRegister);
return RegisterArray(ecx, edx, esi, edi, kReturnRegister0);
return RegisterArray(edi, ecx, edx, esi, kReturnRegister0);
}
// static

View File

@ -363,7 +363,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
}
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
Register value, Register slot_address,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
@ -380,16 +380,16 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so so offset must be a multiple of kTaggedSize.
DCHECK(IsAligned(offset, kTaggedSize));
lea(dst, FieldOperand(object, offset));
lea(slot_address, FieldOperand(object, offset));
if (FLAG_debug_code) {
Label ok;
test_b(dst, Immediate(kTaggedSize - 1));
test_b(slot_address, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
}
RecordWrite(object, dst, value, save_fp, remembered_set_action,
RecordWrite(object, slot_address, value, save_fp, remembered_set_action,
SmiCheck::kOmit);
bind(&done);
@ -398,7 +398,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
mov(slot_address, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
@ -498,11 +498,11 @@ void TurboAssembler::CallRecordWriteStub(
}
}
void MacroAssembler::RecordWrite(Register object, Register address,
void MacroAssembler::RecordWrite(Register object, Register slot_address,
Register value, SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
DCHECK(!AreAliased(object, value, address));
DCHECK(!AreAliased(object, value, slot_address));
AssertNotSmi(object);
if ((remembered_set_action == RememberedSetAction::kOmit &&
@ -513,7 +513,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (FLAG_debug_code) {
Label ok;
cmp(value, Operand(address, 0));
cmp(value, Operand(slot_address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
@ -536,16 +536,16 @@ void MacroAssembler::RecordWrite(Register object, Register address,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
Label::kNear);
RecordComment("CheckPageFlag]");
CallRecordWriteStubSaveRegisters(object, address, remembered_set_action,
fp_mode);
CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
bind(&done);
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
mov(slot_address, Immediate(bit_cast<int32_t>(kZapValue)));
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
}
}

View File

@ -132,8 +132,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::Verify(
// static
void WriteBarrierDescriptor::Verify(CallInterfaceDescriptorData* data) {
DCHECK(!AreAliased(ObjectRegister(), SlotAddressRegister(), ValueRegister()));
// TODO(cbruni): enable on all platforms.
#if V8_TARGET_ARCH_X64
// The default parameters should not clobber vital registers in order to
// reduce code size:
DCHECK(!AreAliased(ObjectRegister(), kContextRegister,
@ -142,9 +140,9 @@ void WriteBarrierDescriptor::Verify(CallInterfaceDescriptorData* data) {
kInterpreterAccumulatorRegister));
DCHECK(!AreAliased(ValueRegister(), kContextRegister,
kInterpreterAccumulatorRegister));
DCHECK(!AreAliased(SlotAddressRegister(), kJavaScriptCallNewTargetRegister));
// Coincidental: to make calling from various builtins easier.
DCHECK_EQ(ObjectRegister(), kJSFunctionRegister);
#endif
// We need a certain set of registers by default:
RegList allocatable_regs = data->allocatable_registers();
DCHECK(allocatable_regs | kContextRegister.bit());

View File

@ -1023,10 +1023,7 @@ class WriteBarrierDescriptor final
DECLARE_DESCRIPTOR(WriteBarrierDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
#if V8_TARGET_ARCH_X64
// TODO(cbruni): Extend to all platforms.
static constexpr bool kCalleeSaveRegisters = true;
#endif
static constexpr inline Register ObjectRegister();
static constexpr inline Register SlotAddressRegister();
// A temporary register used in helpers.

View File

@ -21,7 +21,6 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
// static
constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == v0);
return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
}

View File

@ -21,7 +21,6 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
// static
constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == v0);
return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
}

View File

@ -38,7 +38,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static
constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == r3);
return RegisterArray(r3, r4, r5, r6, r7);
}

View File

@ -39,7 +39,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static
constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == a0);
return RegisterArray(a0, a1, a2, a3);
}

View File

@ -38,7 +38,6 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static
constexpr auto WriteBarrierDescriptor::registers() {
STATIC_ASSERT(kReturnRegister0 == r2);
return RegisterArray(r2, r3, r4, r5, r6);
}

View File

@ -3251,8 +3251,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ PushCPURegList(saves_fp);
// Save registers.
DCHECK_IMPLIES(!saves.IsEmpty(),
saves.list() == CPURegList::GetCalleeSaved().list());
__ PushCPURegList<TurboAssembler::kSignLR>(saves);
if (returns != 0) {

View File

@ -4673,12 +4673,19 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ PushReturnAddressFrom(scratch_reg);
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
Register scratch_reg = ecx;
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
size_t pop_size = (parameter_slots + additional_count) * kSystemPointerSize;
CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), scratch_reg);
if (is_uint16(pop_size)) {
// Avoid the additional scratch register, it might clobber the
// CalleeSavedRegisters.
__ ret(static_cast<int>(pop_size));
} else {
Register scratch_reg = ecx;
DCHECK_EQ(0u,
call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), scratch_reg);
}
} else {
Register pop_reg = g.ToRegister(additional_pop_count);
Register scratch_reg = pop_reg == ecx ? edx : ecx;

View File

@ -493,13 +493,8 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
RegList allocatable_registers = descriptor.allocatable_registers();
RegList callee_saved_registers = kNoCalleeSaved;
if (descriptor.CalleeSaveRegisters()) {
#if V8_TARGET_ARCH_X64
// TODO(cbruni): Extend to all architectures.
callee_saved_registers = allocatable_registers;
DCHECK(callee_saved_registers);
#else
UNREACHABLE();
#endif
}
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
return zone->New<CallDescriptor>( // --