Reland "[ia32] Remove poisoning logic on ia32"

This is a reland of a31a623047

Original change's description:
> [ia32] Remove poisoning logic on ia32
>
> Poisoning has been disabled by default on ia32 a while ago. This CL
> removes its logic from ia32 code generation, which will let us move
> towards fuller (and unconditional) root register support.
>
> Bug: chromium:860429, v8:8254
> Change-Id: I8f672cf48a6ffc7bf21e7794c1b7463d7f8b9594
> Reviewed-on: https://chromium-review.googlesource.com/c/1296131
> Commit-Queue: Jakob Gruber <jgruber@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#56978}

Tbr: mstarzinger@chromium.org,jarin@chromium.org
Bug: chromium:860429, v8:8254
Change-Id: Ia65ac57fdc6b9a0f59cc64455d6a000005e9be3b
Reviewed-on: https://chromium-review.googlesource.com/c/1299080
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56984}
This commit is contained in:
Jakob Gruber 2018-10-25 14:01:12 +02:00 committed by Commit Bot
parent e725ebb1c2
commit ff6138ad08
12 changed files with 47 additions and 165 deletions

View File

@ -1334,13 +1334,8 @@ namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
bool with_result) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Fold into Default config once root is fully supported.
const RegisterConfiguration* config(
RegisterConfiguration::PreserveRootIA32());
#else
const RegisterConfiguration* config(RegisterConfiguration::Default());
#endif
int allocatable_register_count = config->num_allocatable_general_registers();
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
@ -1370,32 +1365,20 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
} // namespace
void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, false, false);
}
void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, false, true);
}
void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, true, false);
}
void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, true, true);
}
@ -2507,10 +2490,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If argv_mode == kArgvInRegister:
// ecx: pointer to the first argument
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once branch load poisoning is removed.
#endif
STATIC_ASSERT(eax == kRuntimeCallArgCountRegister);
STATIC_ASSERT(ecx == kRuntimeCallArgvRegister);
STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
@ -2632,17 +2611,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
__ bind(&skip);
#ifdef V8_EMBEDDED_BUILTINS
STATIC_ASSERT(kRootRegister == kSpeculationPoisonRegister);
CHECK(!FLAG_untrusted_code_mitigations);
#else
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
// with both configurations. It is safe to always do this, because the
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
#endif
// Compute the handler entry address and jump to it.
__ mov(edi, __ ExternalReferenceAsOperand(pending_handler_entrypoint_address,
edi));

View File

@ -309,30 +309,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_;
};
void MoveOperandIfAliasedWithPoisonRegister(Instruction* call_instruction,
CodeGenerator* gen) {
IA32OperandConverter i(gen, call_instruction);
int const poison_index = i.InputInt32(1);
if (poison_index == -1) {
// No aliasing -> nothing to move.
return;
}
i.MoveInstructionOperandToRegister(kSpeculationPoisonRegister,
call_instruction->InputAt(poison_index));
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
IA32OperandConverter& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->and_(value, kSpeculationPoisonRegister);
}
}
} // namespace
#define ASSEMBLE_COMPARE(asm_instr) \
@ -629,23 +605,13 @@ void CodeGenerator::BailoutIfDeoptimized() {
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
__ push(eax); // Push eax so we can use it as a scratch register.
// Set a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
__ ComputeCodeStartAddress(eax);
__ mov(kSpeculationPoisonRegister, Immediate(0));
__ cmp(kJavaScriptCallCodeStartRegister, eax);
__ mov(eax, Immediate(-1));
__ cmov(equal, kSpeculationPoisonRegister, eax);
__ pop(eax); // Restore eax.
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
__ and_(kJSFunctionRegister, kSpeculationPoisonRegister);
__ and_(kContextRegister, kSpeculationPoisonRegister);
__ and_(esp, kSpeculationPoisonRegister);
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
}
// Assembles an instruction after register allocation, producing machine code.
@ -656,7 +622,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
InstructionOperand* op = instr->InputAt(0);
if (op->IsImmediate()) {
Handle<Code> code = i.InputCode(0);
@ -695,7 +660,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallWasmFunction: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0));
Address wasm_code = static_cast<Address>(constant.ToInt32());
@ -722,7 +686,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg);
@ -747,7 +710,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallWasm: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0));
Address wasm_code = static_cast<Address>(constant.ToInt32());
@ -765,7 +727,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallAddress: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
@ -781,7 +742,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallJSFunction: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
@ -832,7 +792,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
@ -1217,8 +1176,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bswap(i.OutputRegister());
break;
case kArchWordPoisonOnSpeculation:
DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
__ and_(i.InputRegister(0), kSpeculationPoisonRegister);
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
break;
case kLFence:
__ lfence();
@ -1593,11 +1552,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kIA32Movsxbl:
ASSEMBLE_MOVX(movsx_b);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movzxbl:
ASSEMBLE_MOVX(movzx_b);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movb: {
size_t index = 0;
@ -1607,16 +1564,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ mov_b(operand, i.InputRegister(index));
}
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kIA32Movsxwl:
ASSEMBLE_MOVX(movsx_w);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movzxwl:
ASSEMBLE_MOVX(movzx_w);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movw: {
size_t index = 0;
@ -1626,13 +1580,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ mov_w(operand, i.InputRegister(index));
}
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kIA32Movl:
if (instr->HasOutput()) {
__ mov(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@ -3919,15 +3871,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
condition = NegateFlagsCondition(condition);
__ setcc(FlagsConditionToCondition(condition), kSpeculationPoisonRegister);
__ add(kSpeculationPoisonRegister, Immediate(255));
__ sar(kSpeculationPoisonRegister, 31u);
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@ -4255,7 +4200,6 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();

View File

@ -1353,7 +1353,6 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
// For Word64 operations, the value input is split into the a high node,
// and a low node in the int64-lowering phase.
Node* value_high = node->InputAt(3);
bool block_root_register = !FLAG_embedded_builtins;
// Wasm lives in 32-bit address space, so we do not need to worry about
// base/index lowering. This will need to be fixed for Wasm64.
@ -1366,22 +1365,19 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
Node* projection0 = NodeProperties::FindProjection(node, 0);
Node* projection1 = NodeProperties::FindProjection(node, 1);
if (projection1) {
InstructionOperand temps[] = {g.TempRegister(ebx)};
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
g.DefineAsFixed(projection1, edx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps);
0, {});
} else if (projection0) {
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
InstructionOperand temps[] = {g.TempRegister(edx), g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
InstructionOperand temps[] = {g.TempRegister(edx)};
const int num_temps = arraysize(temps);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps);
} else {
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
const int num_temps = arraysize(temps);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps,
temps);
}
@ -1803,7 +1799,6 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
bool block_root_register = !FLAG_embedded_builtins;
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
@ -1813,9 +1808,8 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
// Allocating temp registers here as stores are performed using an atomic
// exchange, the output of which is stored in edx:eax, which should be saved
// and restored at the end of the instruction.
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
const int num_temps = arraysize(temps);
InstructionCode code =
kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
@ -1849,7 +1843,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
IA32OperandGenerator g(this);
Node* index = node->InputAt(1);
AddressingMode addressing_mode;
bool block_root_register = !FLAG_embedded_builtins;
InstructionOperand inputs[] = {
// High, Low values of old value
@ -1866,22 +1859,18 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
AddressingModeField::encode(addressing_mode);
if (projection1) {
InstructionOperand temps[] = {g.TempRegister(ebx)};
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
g.DefineAsFixed(projection1, edx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, 0, {});
} else if (projection0) {
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
InstructionOperand temps[] = {g.TempRegister(edx), g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
InstructionOperand temps[] = {g.TempRegister(edx)};
const int num_temps = arraysize(temps);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps);
} else {
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
const int num_temps = arraysize(temps);
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
}
}

View File

@ -1015,7 +1015,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
unallocated.HasFixedRegisterPolicy()) {
int reg = unallocated.fixed_register_index();
if (reg == kSpeculationPoisonRegister.code()) {
if (Register::from_code(reg) == kSpeculationPoisonRegister) {
buffer->instruction_args[poison_alias_index] = g.TempImmediate(
static_cast<int32_t>(buffer->instruction_args.size()));
op = g.UseRegisterOrSlotOrConstant(*iter);
@ -2590,7 +2590,6 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
if (flags & CallDescriptor::kAllowCallThroughSlot) {
// TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative call
// instead once builtins are embedded in every build configuration.
DCHECK(FLAG_embedded_builtins);
call_buffer_flags |= kAllowCallThroughSlot;
#ifndef V8_TARGET_ARCH_32_BIT
// kAllowCallThroughSlot is only supported on ia32.

View File

@ -2448,28 +2448,24 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
std::unique_ptr<const RegisterConfiguration> config;
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegisters(config.get(), call_descriptor, run_verifier);
} else if (data->info()->GetPoisoningMitigationLevel() !=
PoisoningMitigationLevel::kDontPoison) {
#if defined(V8_TARGET_ARCH_IA32)
DCHECK(!FLAG_embedded_builtins);
#endif
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
#ifdef V8_TARGET_ARCH_IA32
} else {
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
// TODO(v8:6666): Ensure that that this configuration cooperates with
// restricted allocatable registers above, i.e. that we guarantee a
// restricted configuration cannot allocate kRootRegister on ia32.
static_assert(kRootRegister == kSpeculationPoisonRegister,
"The following checks assume root equals poison register");
CHECK(!FLAG_untrusted_code_mitigations);
AllocateRegisters(RegisterConfiguration::PreserveRootIA32(),
call_descriptor, run_verifier);
}
#else
} else if (data->info()->GetPoisoningMitigationLevel() !=
PoisoningMitigationLevel::kDontPoison) {
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
} else {
AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
run_verifier);
#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
}
#endif // V8_TARGET_ARCH_IA32
// Verify the instruction sequence has the same hash in two stages.
VerifyGeneratedCodeIsIdempotent();

View File

@ -4630,12 +4630,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = undefined_node;
}
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
#ifdef V8_TARGET_ARCH_IA32
// TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative
// call instead once builtins are embedded in every build configuration.
flags = FLAG_embedded_builtins ? CallDescriptor::kAllowCallThroughSlot
: CallDescriptor::kNoFlags;
CallDescriptor::Flags flags = CallDescriptor::kAllowCallThroughSlot;
#else
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
#endif
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), ArgumentsAdaptorDescriptor{}, 1 + wasm_count,

View File

@ -809,16 +809,9 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
#if defined(V8_TARGET_ARCH_IA32)
constexpr bool kShouldInitializeRootRegister = FLAG_embedded_builtins;
#else
constexpr bool kShouldInitializeRootRegister = true;
#endif
if (kShouldInitializeRootRegister) {
FrameDescription* topmost = output_[count - 1];
topmost->GetRegisterValues()->SetRegister(kRootRegister.code(),
isolate()->isolate_root());
}
FrameDescription* topmost = output_[count - 1];
topmost->GetRegisterValues()->SetRegister(kRootRegister.code(),
isolate()->isolate_root());
// Print some helpful diagnostic information.
if (trace_scope_ != nullptr) {
@ -1484,7 +1477,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
const bool must_handle_result =
!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy;
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
#ifdef V8_TARGET_ARCH_IA32
// TODO(v8:6666): Fold into Default config once root is fully supported.
const RegisterConfiguration* config(
RegisterConfiguration::PreserveRootIA32());

View File

@ -41,15 +41,9 @@ MacroAssembler::MacroAssembler(Isolate* isolate,
code_object_ = Handle<HeapObject>::New(
*isolate->factory()->NewSelfReferenceMarker(), isolate);
}
// TODO(jgruber, v8:6666): Remove once root register is always available.
set_root_array_available(FLAG_embedded_builtins);
}
void TurboAssembler::InitializeRootRegister() {
// TODO(v8:6666): Initialize unconditionally once poisoning support has been
// removed.
if (!FLAG_embedded_builtins) return;
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, Immediate(isolate_root));
}
@ -1999,10 +1993,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Immediate(-1));
}
} // namespace internal
} // namespace v8

View File

@ -21,7 +21,6 @@ constexpr Register kReturnRegister2 = edi;
constexpr Register kJSFunctionRegister = edi;
constexpr Register kContextRegister = esi;
constexpr Register kAllocateSizeRegister = edx;
constexpr Register kSpeculationPoisonRegister = ebx;
constexpr Register kInterpreterAccumulatorRegister = eax;
constexpr Register kInterpreterBytecodeOffsetRegister = edx;
constexpr Register kInterpreterBytecodeArrayRegister = edi;
@ -49,6 +48,9 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = edi;
constexpr Register kRootRegister = ebx;
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
constexpr Register kSpeculationPoisonRegister = no_reg;
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
@ -454,7 +456,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
void ResetSpeculationPoisonRegister();
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
void ResetSpeculationPoisonRegister() { UNREACHABLE(); }
};
// MacroAssembler implements a collection of frequently used macros.

View File

@ -166,7 +166,7 @@ static base::LazyInstance<ArchDefaultPoisoningRegisterConfiguration,
PoisoningRegisterConfigurationInitializer>::type
kDefaultPoisoningRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
#ifdef V8_TARGET_ARCH_IA32
// Allocatable registers with the root register removed.
// TODO(v8:6666): Once all builtins have been migrated, we could remove this
// configuration and remove kRootRegister from ALLOCATABLE_GENERAL_REGISTERS
@ -213,7 +213,7 @@ struct PreserveRootIA32RegisterConfigurationInitializer {
static base::LazyInstance<ArchPreserveRootIA32RegisterConfiguration,
PreserveRootIA32RegisterConfigurationInitializer>::
type kPreserveRootIA32RegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
#endif // V8_TARGET_ARCH_IA32
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
@ -267,11 +267,11 @@ const RegisterConfiguration* RegisterConfiguration::Poisoning() {
return &kDefaultPoisoningRegisterConfiguration.Get();
}
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
#ifdef V8_TARGET_ARCH_IA32
const RegisterConfiguration* RegisterConfiguration::PreserveRootIA32() {
return &kPreserveRootIA32RegisterConfiguration.Get();
}
#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
#endif // V8_TARGET_ARCH_IA32
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) {

View File

@ -51,7 +51,7 @@ class RandomNumberGenerator;
namespace internal {
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
#ifdef V8_TARGET_ARCH_IA32
// TODO(v8:6666): Fold into Default config once root is fully supported.
const auto GetRegConfig = RegisterConfiguration::PreserveRootIA32;
#else

View File

@ -361,7 +361,7 @@ class TestEnvironment : public HandleAndZoneScope {
public:
// These constants may be tuned to experiment with different environments.
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
#ifdef V8_TARGET_ARCH_IA32
static constexpr int kGeneralRegisterCount = 3;
#else
static constexpr int kGeneralRegisterCount = 4;