[Ignition] [TurboFan] Generate speculation poison in code generator.

Moves generation of speculation poison to be based on the PC target vs the
actual PC being executed. The speculation poison is generated in the prologue
of the generated code if CompilationInfo::kGenerateSpeculationPoison is set.
The result is stored in a known register, which can then be read using the
SpeculationPoison machine node.

Currently we need to ensure the SpeculationPoison node is scheduled right after
the code prologue so that the poison register doesn't get clobbered. This is
currently not verified, however it's only use is in RawMachineAssembler where
it is manually scheduled early.

The Ignition bytecode handlers are updated to use this speculation poison
rather than one generated by comparing the target bytecode.

BUG=chromium:798964

Change-Id: I2a3d0cfc694e88d7a8fe893282bd5082f693d5e2
Reviewed-on: https://chromium-review.googlesource.com/893160
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51229}
This commit is contained in:
Ross McIlroy 2018-02-11 19:17:27 +00:00 committed by Commit Bot
parent 29844bac3a
commit a021b6c42d
51 changed files with 362 additions and 172 deletions

View File

@ -300,8 +300,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -20,11 +20,11 @@ constexpr Register kReturnRegister2 = r2;
constexpr Register kJSFunctionRegister = r1;
constexpr Register kContextRegister = r7;
constexpr Register kAllocateSizeRegister = r1;
constexpr Register kSpeculationPoisonRegister = r4;
constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;
constexpr Register kInterpreterDispatchTableRegister = r8;
constexpr Register kInterpreterTargetBytecodeRegister = r4;
constexpr Register kJavaScriptCallArgCountRegister = r0;
constexpr Register kJavaScriptCallCodeStartRegister = r2;
constexpr Register kJavaScriptCallNewTargetRegister = r3;

View File

@ -311,8 +311,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -47,11 +47,11 @@ namespace internal {
#define kJSFunctionRegister x1
#define kContextRegister cp
#define kAllocateSizeRegister x1
#define kSpeculationPoisonRegister x18
#define kInterpreterAccumulatorRegister x0
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
#define kInterpreterTargetBytecodeRegister x18
#define kJavaScriptCallArgCountRegister x0
#define kJavaScriptCallCodeStartRegister x2
#define kJavaScriptCallNewTargetRegister x3

View File

@ -1015,13 +1015,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
__ ldrb(kInterpreterTargetBytecodeRegister,
MemOperand(kInterpreterBytecodeArrayRegister,
__ ldrb(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldr(r1,
MemOperand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, LSL, kPointerSizeLog2));
__ Call(r1);
__ ldr(
kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, r4, LSL, kPointerSizeLog2));
__ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@ -1221,15 +1220,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ ldrb(kInterpreterTargetBytecodeRegister,
MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ ldr(scratch,
MemOperand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, LSL, kPointerSizeLog2));
__ Jump(scratch);
__ ldrb(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldr(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, scratch, LSL,
kPointerSizeLog2));
__ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {

View File

@ -1108,13 +1108,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
__ Ldrb(kInterpreterTargetBytecodeRegister,
MemOperand(kInterpreterBytecodeArrayRegister,
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x1,
Operand(kInterpreterTargetBytecodeRegister, LSL, kPointerSizeLog2));
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
__ Call(ip0);
__ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
__ Ldr(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, x1));
__ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@ -1342,13 +1341,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ Ldrb(kInterpreterTargetBytecodeRegister,
MemOperand(kInterpreterBytecodeArrayRegister,
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x1,
Operand(kInterpreterTargetBytecodeRegister, LSL, kPointerSizeLog2));
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
__ Jump(ip0);
__ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
__ Ldr(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, x1));
__ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {

View File

@ -938,13 +938,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Immediate(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
__ movzx_b(kInterpreterTargetBytecodeRegister,
Operand(kInterpreterBytecodeArrayRegister,
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(edx,
Operand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, times_pointer_size, 0));
__ call(edx);
__ mov(
kJavaScriptCallCodeStartRegister,
Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
__ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@ -962,7 +961,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, ebx, edx,
kInterpreterBytecodeOffsetRegister, ebx, ecx,
&do_return);
__ jmp(&do_dispatch);
@ -1268,13 +1267,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ movzx_b(kInterpreterTargetBytecodeRegister,
Operand(kInterpreterBytecodeArrayRegister,
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(edx,
Operand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, times_pointer_size, 0));
__ jmp(edx);
__ mov(
kJavaScriptCallCodeStartRegister,
Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
__ jmp(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@ -1292,7 +1290,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance to the next bytecode.
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, ebx, edx,
kInterpreterBytecodeOffsetRegister, ebx, ecx,
&if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.

View File

@ -1003,11 +1003,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ Addu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(kInterpreterTargetBytecodeRegister, MemOperand(a0));
__ Lsa(at, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, kPointerSizeLog2);
__ lw(at, MemOperand(at));
__ Call(at);
__ lbu(t3, MemOperand(a0));
__ Lsa(at, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
__ lw(kJavaScriptCallCodeStartRegister, MemOperand(at));
__ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@ -1227,11 +1226,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(kInterpreterTargetBytecodeRegister, MemOperand(a1));
__ Lsa(a1, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, kPointerSizeLog2);
__ lw(a1, MemOperand(a1));
__ Jump(a1);
__ lbu(t3, MemOperand(a1));
__ Lsa(a1, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
__ lw(kJavaScriptCallCodeStartRegister, MemOperand(a1));
__ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {

View File

@ -1000,11 +1000,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ Daddu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Lbu(kInterpreterTargetBytecodeRegister, MemOperand(a0));
__ Dlsa(at, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, kPointerSizeLog2);
__ Ld(at, MemOperand(at));
__ Call(at);
__ Lbu(a7, MemOperand(a0));
__ Dlsa(at, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
__ Ld(kJavaScriptCallCodeStartRegister, MemOperand(at));
__ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@ -1225,11 +1224,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Lbu(kInterpreterTargetBytecodeRegister, MemOperand(a1));
__ Dlsa(a1, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, kPointerSizeLog2);
__ Ld(a1, MemOperand(a1));
__ Jump(a1);
__ Lbu(a7, MemOperand(a1));
__ Dlsa(a1, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
__ Ld(kJavaScriptCallCodeStartRegister, MemOperand(a1));
__ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {

View File

@ -1006,13 +1006,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
__ movzxbp(kInterpreterTargetBytecodeRegister,
Operand(kInterpreterBytecodeArrayRegister,
__ movzxbp(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx,
Operand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, times_pointer_size, 0));
__ call(rbx);
__ movp(
kJavaScriptCallCodeStartRegister,
Operand(kInterpreterDispatchTableRegister, r11, times_pointer_size, 0));
__ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@ -1239,13 +1238,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ movzxbp(kInterpreterTargetBytecodeRegister,
Operand(kInterpreterBytecodeArrayRegister,
__ movzxbp(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx,
Operand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, times_pointer_size, 0));
__ jmp(rbx);
__ movp(
kJavaScriptCallCodeStartRegister,
Operand(kInterpreterDispatchTableRegister, r11, times_pointer_size, 0));
__ jmp(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {

View File

@ -59,7 +59,11 @@ CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
CompilationInfo::CompilationInfo(Vector<const char> debug_name, Zone* zone,
Code::Kind code_kind)
: CompilationInfo(debug_name, code_kind, STUB, zone) {}
: CompilationInfo(debug_name, code_kind, STUB, zone) {
if (code_kind == Code::BYTECODE_HANDLER && has_untrusted_code_mitigations()) {
SetFlag(CompilationInfo::kGenerateSpeculationPoison);
}
}
CompilationInfo::CompilationInfo(Vector<const char> debug_name,
Code::Kind code_kind, Mode mode, Zone* zone)

View File

@ -52,6 +52,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
kLoopPeelingEnabled = 1 << 10,
kUntrustedCodeMitigations = 1 << 11,
kSwitchJumpTableEnabled = 1 << 12,
kGenerateSpeculationPoison = 1 << 13,
};
// TODO(mtrofin): investigate if this might be generalized outside wasm, with
@ -174,6 +175,12 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
return GetFlag(kSwitchJumpTableEnabled);
}
bool is_speculation_poison_enabled() const {
bool enabled = GetFlag(kGenerateSpeculationPoison);
DCHECK_IMPLIES(enabled, has_untrusted_code_mitigations());
return enabled;
}
// Code getters and setters.
void SetCode(Handle<Code> code) { code_ = code; }

View File

@ -608,6 +608,31 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne);
}
void CodeGenerator::GenerateSpeculationPoison() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
// We can use the register pc - 8 for the address of the current
// instruction.
int pc_offset = __ pc_offset();
__ add(scratch, pc, Operand(pc_offset - TurboAssembler::kPcLoadDelta));
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
__ mov(kSpeculationPoisonRegister, scratch);
__ sub(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sub(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
scratch);
__ orr(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ asr(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
Operand(kBitsPerPointer - 1));
__ mvn(kSpeculationPoisonRegister, Operand(kSpeculationPoisonRegister));
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {

View File

@ -563,6 +563,30 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Bind(&not_deoptimized);
}
void CodeGenerator::GenerateSpeculationPoison() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.AcquireX();
// We can use adr to load a pc relative location.
int pc_offset = __ pc_offset();
__ adr(scratch, -pc_offset);
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
__ Mov(kSpeculationPoisonRegister, scratch);
__ Sub(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ Sub(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
scratch);
__ Orr(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ Asr(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kBitsPerPointer - 1);
__ Mvn(kSpeculationPoisonRegister, Operand(kSpeculationPoisonRegister));
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {

View File

@ -58,6 +58,8 @@ CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
Code::Kind kind, const char* name, size_t result_size, uint32_t stub_key,
int32_t builtin_index)
// TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
// bytecode handlers?
: CodeAssemblerState(
isolate, zone,
Linkage::GetStubCallDescriptor(
@ -422,6 +424,10 @@ Node* CodeAssembler::LoadStackPointer() {
return raw_assembler()->LoadStackPointer();
}
Node* CodeAssembler::SpeculationPoison() {
return raw_assembler()->SpeculationPoison();
}
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
TNode<ResType> CodeAssembler::name(SloppyTNode<Arg1Type> a, \
SloppyTNode<Arg2Type> b) { \
@ -1141,7 +1147,7 @@ Node* CodeAssembler::TailCallBytecodeDispatch(
// CSA-generated code
template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
Node*, Node*, Node*);
Node*, Node*);
Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
int input_count, Node* const* inputs) {

View File

@ -710,6 +710,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Access to the stack pointer
Node* LoadStackPointer();
// Poison mask for speculation.
Node* SpeculationPoison();
// Load raw memory location.
Node* Load(MachineType rep, Node* base);
template <class Type>

View File

@ -148,11 +148,14 @@ void CodeGenerator::AssembleCode() {
ProfileEntryHookStub::MaybeCallEntryHookDelayed(tasm(), zone());
}
// TODO(jupvfranco): This should be the first thing in the code,
// or otherwise MaybeCallEntryHookDelayed may happen twice (for
// optimized and deoptimized code).
// We want to bailout only from JS functions, which are the only ones
// that are optimized.
if (info->is_speculation_poison_enabled()) {
GenerateSpeculationPoison();
}
// TODO(jupvfranco): This should be the first thing in the code after
// generating speculation poison, or otherwise MaybeCallEntryHookDelayed may
// happen twice (for optimized and deoptimized code). We want to bailout only
// from JS functions, which are the only ones that are optimized.
if (info->IsOptimizing()) {
DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
BailoutIfDeoptimized();

View File

@ -185,6 +185,10 @@ class CodeGenerator final : public GapResolver::Assembler {
// from the JS functions referring it.
void BailoutIfDeoptimized();
// Generates a mask which can be used to poison values when we detect
// the code is executing speculatively.
void GenerateSpeculationPoison();
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
void AssembleConstructFrame();

View File

@ -504,6 +504,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
if (FLAG_debug_code) {
__ push(eax); // Push eax so we can use it as a scratch register.
// Check that {kJavaScriptCallCodeStartRegister} is correct.
Label current;
__ call(&current);
@ -512,10 +513,11 @@ void CodeGenerator::BailoutIfDeoptimized() {
// In order to get the address of the current instruction, we first need
// to use a call and then use a pop, thus pushing the return address to
// the stack and then popping it into the register.
__ pop(ebx);
__ sub(ebx, Immediate(pc));
__ cmp(ebx, kJavaScriptCallCodeStartRegister);
__ pop(eax);
__ sub(eax, Immediate(pc));
__ cmp(eax, kJavaScriptCallCodeStartRegister);
__ Assert(equal, AbortReason::kWrongFunctionCodeStart);
__ pop(eax); // Restore eax.
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
@ -527,6 +529,33 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
void CodeGenerator::GenerateSpeculationPoison() {
__ push(eax); // Push eax so we can use it as a scratch register.
// In order to get the address of the current instruction, we first need
// to use a call and then use a pop, thus pushing the return address to
// the stack and then popping it into the register.
Label current;
__ call(&current);
int pc = __ pc_offset();
__ bind(&current);
__ pop(eax);
__ sub(eax, Immediate(pc));
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
__ mov(kSpeculationPoisonRegister, eax);
__ sub(kSpeculationPoisonRegister, kJavaScriptCallCodeStartRegister);
__ sub(kJavaScriptCallCodeStartRegister, eax);
__ or_(kSpeculationPoisonRegister, kJavaScriptCallCodeStartRegister);
__ sar(kSpeculationPoisonRegister, kBitsPerPointer - 1);
__ not_(kSpeculationPoisonRegister);
__ pop(eax); // Restore eax.
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {

View File

@ -25,6 +25,7 @@ InstructionSelector::InstructionSelector(
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table,
EnableSpeculationPoison enable_speculation_poison,
SourcePositionMode source_position_mode, Features features,
EnableScheduling enable_scheduling,
EnableSerialization enable_serialization)
@ -47,6 +48,7 @@ InstructionSelector::InstructionSelector(
enable_scheduling_(enable_scheduling),
enable_serialization_(enable_serialization),
enable_switch_jump_table_(enable_switch_jump_table),
enable_speculation_poison_(enable_speculation_poison),
frame_(frame),
instruction_selection_failed_(false) {
instructions_.reserve(node_count);
@ -760,11 +762,14 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
Node* callee = call->InputAt(0);
bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
buffer->instruction_args.push_back(
(call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallAddress:
@ -772,6 +777,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
(call_address_immediate &&
callee->opcode() == IrOpcode::kExternalConstant)
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallWasmFunction:
@ -780,6 +787,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
(callee->opcode() == IrOpcode::kRelocatableInt64Constant ||
callee->opcode() == IrOpcode::kRelocatableInt32Constant))
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallJSFunction:
@ -1472,6 +1481,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
case IrOpcode::kSpeculationPoison:
return VisitSpeculationPoison(node);
case IrOpcode::kStackSlot:
return VisitStackSlot(node);
case IrOpcode::kLoadStackPointer:
@ -1792,6 +1803,14 @@ void InstructionSelector::VisitNode(Node* node) {
}
}
void InstructionSelector::VisitSpeculationPoison(Node* node) {
CHECK(enable_speculation_poison_ == kEnableSpeculationPoison);
OperandGenerator g(this);
Emit(kArchNop, g.DefineAsLocation(node, LinkageLocation::ForRegister(
kSpeculationPoisonRegister.code(),
MachineType::UintPtr())));
}
void InstructionSelector::VisitLoadStackPointer(Node* node) {
OperandGenerator g(this);
Emit(kArchStackPointer, g.DefineAsRegister(node));
@ -2436,6 +2455,9 @@ void InstructionSelector::VisitTailCall(Node* node) {
if (IsTailCallAddressImmediate()) {
flags |= kCallAddressImmediate;
}
if (callee->flags() & CallDescriptor::kFixedTargetRegister) {
flags |= kCallFixedTargetRegister;
}
InitializeCallBuffer(node, &buffer, flags, true, stack_param_delta);
// Select the appropriate opcode based on the call type.

View File

@ -55,12 +55,17 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
kDisableSwitchJumpTable,
kEnableSwitchJumpTable
};
enum EnableSpeculationPoison {
kDisableSpeculationPoison,
kEnableSpeculationPoison
};
InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table,
EnableSpeculationPoison enable_speculation_poison,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures(),
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
@ -282,7 +287,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
enum CallBufferFlag {
kCallCodeImmediate = 1u << 0,
kCallAddressImmediate = 1u << 1,
kCallTail = 1u << 2
kCallTail = 1u << 2,
kCallFixedTargetRegister = 1u << 3,
};
typedef base::Flags<CallBufferFlag> CallBufferFlags;
@ -453,6 +459,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
EnableScheduling enable_scheduling_;
EnableSerialization enable_serialization_;
EnableSwitchJumpTable enable_switch_jump_table_;
EnableSpeculationPoison enable_speculation_poison_;
Frame* frame_;
bool instruction_selection_failed_;
};

View File

@ -461,6 +461,8 @@ CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
// The target for interpreter dispatches is a code entry address.
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
const CallDescriptor::Flags kFlags =
CallDescriptor::kCanUseRoots | CallDescriptor::kFixedTargetRegister;
return new (zone) CallDescriptor( // --
CallDescriptor::kCallAddress, // kind
target_type, // target MachineType
@ -470,7 +472,7 @@ CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
CallDescriptor::kCanUseRoots, // flags
kFlags, // flags
descriptor.DebugName(isolate));
}

View File

@ -184,7 +184,10 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// Push argument count as part of function prologue.
kPushArgumentCount = 1u << 5,
// Use retpoline for this call if indirect.
kRetpoline = 1u << 6
kRetpoline = 1u << 6,
// Use the kJavaScriptCallCodeStartRegister (fixed) register for the
// indirect target address when calling.
kFixedTargetRegister = 1u << 7
};
typedef base::Flags<Flag> Flags;

View File

@ -119,6 +119,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kSpeculationPoison:
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;

View File

@ -224,6 +224,7 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
V(SpeculationPoison, Operator::kNoProperties, 0, 0, 1) \
V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \

View File

@ -599,6 +599,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
// Returns a value which can be used as a mask to poison values when executing
// speculatively.
const Operator* SpeculationPoison();
// Access to the machine stack.
const Operator* LoadStackPointer();
const Operator* LoadFramePointer();

View File

@ -653,6 +653,40 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne, a2, Operand(zero_reg));
}
void CodeGenerator::GenerateSpeculationPoison() {
// This push on ra and the pop below together ensure that we restore the
// register ra, which is needed while computing speculation poison.
__ push(ra);
// The bal instruction puts the address of the current instruction into
// the return address (ra) register, which we can use later on.
Label current;
__ bal(&current);
__ nop();
int pc = __ pc_offset();
__ bind(&current);
__ li(at, pc);
__ subu(at, ra, at);
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
__ Move(kSpeculationPoisonRegister, at);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
at);
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kBitsPerPointer - 1);
__ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kSpeculationPoisonRegister);
__ pop(ra); // Restore ra
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {

View File

@ -669,6 +669,40 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne, a2, Operand(zero_reg));
}
void CodeGenerator::GenerateSpeculationPoison() {
// This push on ra and the pop below together ensure that we restore the
// register ra, which is needed while computing speculation poison.
__ push(ra);
// The bal instruction puts the address of the current instruction into
// the return address (ra) register, which we can use later on.
Label current;
__ bal(&current);
__ nop();
int pc = __ pc_offset();
__ bind(&current);
__ li(at, Operand(pc));
__ Dsubu(at, ra, at);
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
__ Move(kSpeculationPoisonRegister, at);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
at);
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kBitsPerPointer - 1);
__ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kSpeculationPoisonRegister);
__ pop(ra); // Restore ra
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {

View File

@ -600,6 +600,7 @@
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(SpeculationPoison) \
V(LoadStackPointer) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \

View File

@ -1535,6 +1535,9 @@ struct InstructionSelectionPhase {
data->info()->switch_jump_table_enabled()
? InstructionSelector::kEnableSwitchJumpTable
: InstructionSelector::kDisableSwitchJumpTable,
data->info()->is_speculation_poison_enabled()
? InstructionSelector::kEnableSpeculationPoison
: InstructionSelector::kDisableSpeculationPoison,
data->info()->is_source_positions_enabled()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions,

View File

@ -744,6 +744,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* StringConstant(const char* string) {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
}
Node* SpeculationPoison() {
return AddNode(machine()->SpeculationPoison(), graph()->start());
}
// Call a given call descriptor and the given arguments.
// The call target is passed as part of the {inputs} array.
@ -905,6 +908,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder common_;
CallDescriptor* call_descriptor_;
NodeVector parameters_;
Node* speculation_poison_;
BasicBlock* current_block_;
DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);

View File

@ -1646,6 +1646,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32PairShl:
case IrOpcode::kWord32PairShr:
case IrOpcode::kWord32PairSar:
case IrOpcode::kSpeculationPoison:
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:

View File

@ -591,14 +591,34 @@ void CodeGenerator::BailoutIfDeoptimized() {
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ movp(rcx, Operand(kJavaScriptCallCodeStartRegister, offset));
__ testl(FieldOperand(rcx, CodeDataContainer::kKindSpecificFlagsOffset),
__ movp(rbx, Operand(kJavaScriptCallCodeStartRegister, offset));
__ testl(FieldOperand(rbx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
void CodeGenerator::GenerateSpeculationPoison() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
Label current;
__ bind(&current);
int pc = __ pc_offset();
__ leaq(rbx, Operand(&current));
if (pc != 0) {
__ subq(rbx, Immediate(pc));
}
__ movp(kSpeculationPoisonRegister, rbx);
__ subq(kSpeculationPoisonRegister, kJavaScriptCallCodeStartRegister);
__ subq(kJavaScriptCallCodeStartRegister, rbx);
__ orq(kSpeculationPoisonRegister, kJavaScriptCallCodeStartRegister);
__ sarq(kSpeculationPoisonRegister, Immediate(kBitsPerPointer - 1));
__ notq(kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@ -698,6 +718,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
if (HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister)) {
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
DCHECK_EQ(rcx, reg);
}
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {

View File

@ -296,8 +296,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -20,11 +20,11 @@ constexpr Register kReturnRegister2 = edi;
constexpr Register kJSFunctionRegister = edi;
constexpr Register kContextRegister = esi;
constexpr Register kAllocateSizeRegister = edx;
constexpr Register kSpeculationPoisonRegister = ebx;
constexpr Register kInterpreterAccumulatorRegister = eax;
constexpr Register kInterpreterBytecodeOffsetRegister = ecx;
constexpr Register kInterpreterBytecodeOffsetRegister = edx;
constexpr Register kInterpreterBytecodeArrayRegister = edi;
constexpr Register kInterpreterDispatchTableRegister = esi;
constexpr Register kInterpreterTargetBytecodeRegister = ebx;
constexpr Register kJavaScriptCallArgCountRegister = eax;
constexpr Register kJavaScriptCallCodeStartRegister = ecx;
constexpr Register kJavaScriptCallNewTargetRegister = edx;

View File

@ -593,11 +593,10 @@ void ApiCallbackDescriptor::InitializePlatformIndependent(
void InterpreterDispatchDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable,
// kTargetBytecode
// kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable
MachineType machine_types[] = {
MachineType::AnyTagged(), MachineType::IntPtr(), MachineType::AnyTagged(),
MachineType::IntPtr(), MachineType::IntPtr()};
MachineType::IntPtr()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}

View File

@ -832,7 +832,7 @@ class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
: public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kAccumulator, kBytecodeOffset, kBytecodeArray,
kDispatchTable, kTargetBytecode)
kDispatchTable)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterDispatchDescriptor,
CallInterfaceDescriptor)
};

View File

@ -48,10 +48,7 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
made_call_(false),
reloaded_frame_ptr_(false),
bytecode_array_valid_(true),
speculation_poison_(
FLAG_untrusted_code_mitigations
? GenerateSpeculationPoison(
Parameter(InterpreterDispatchDescriptor::kTargetBytecode))
speculation_poison_(FLAG_untrusted_code_mitigations ? SpeculationPoison()
: nullptr),
disable_stack_check_across_call_(false),
stack_pointer_before_call_(nullptr) {
@ -77,21 +74,6 @@ InterpreterAssembler::~InterpreterAssembler() {
UnregisterCallGenerationCallbacks();
}
Node* InterpreterAssembler::GenerateSpeculationPoison(Node* current_bytecode) {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong bytecode handler.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
Node* expected_bytecode = IntPtrConstant(static_cast<int>(bytecode_));
Node* difference = WordOr(IntPtrSub(current_bytecode, expected_bytecode),
IntPtrSub(expected_bytecode, current_bytecode));
return WordNot(WordSar(difference, IntPtrConstant(kBitsPerPointer - 1)));
}
void InterpreterAssembler::DisableSpeculationPoisoning() {
speculation_poison_ = nullptr;
}
Node* InterpreterAssembler::PoisonOnSpeculationTagged(Node* value) {
if (speculation_poison_ == nullptr) return value;
return BitcastWordToTagged(
@ -1458,11 +1440,10 @@ Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
InterpreterDispatchDescriptor descriptor(isolate());
// Propagate speculation poisoning.
Node* poisoned_target_bytecode = PoisonOnSpeculationWord(target_bytecode);
Node* poisoned_handler_entry = PoisonOnSpeculationWord(handler_entry);
return TailCallBytecodeDispatch(
descriptor, handler_entry, GetAccumulatorUnchecked(), bytecode_offset,
BytecodeArrayTaggedPointer(), DispatchTableRawPointer(),
poisoned_target_bytecode);
descriptor, poisoned_handler_entry, GetAccumulatorUnchecked(),
bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {

View File

@ -266,9 +266,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Lazily deserializes the current bytecode's handler and tail-calls into it.
void DeserializeLazyAndDispatch();
// Disables poisoning on speculative execution.
void DisableSpeculationPoisoning();
private:
// Returns a tagged pointer to the current function's BytecodeArray object.
compiler::Node* BytecodeArrayTaggedPointer();
@ -292,9 +289,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* LoadRegister(Node* reg_index);
void StoreRegister(compiler::Node* value, compiler::Node* reg_index);
// Generates a poison which can be used to mask values on speculative paths.
compiler::Node* GenerateSpeculationPoison(Node* current_bytecode);
// Poison |value| on speculative paths.
compiler::Node* PoisonOnSpeculationTagged(Node* value);
compiler::Node* PoisonOnSpeculationWord(Node* value);

View File

@ -3095,10 +3095,7 @@ class DeserializeLazyAssembler : public InterpreterAssembler {
explicit DeserializeLazyAssembler(compiler::CodeAssemblerState* state,
OperandScale operand_scale)
: InterpreterAssembler(state, kFakeBytecode, operand_scale) {
// Disable speculation poisoning for this handler since we use kFakeBytecode
DisableSpeculationPoisoning();
}
: InterpreterAssembler(state, kFakeBytecode, operand_scale) {}
static void Generate(compiler::CodeAssemblerState* state,
OperandScale operand_scale) {

View File

@ -291,8 +291,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -19,11 +19,11 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
constexpr Register kSpeculationPoisonRegister = t3;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t4;
constexpr Register kInterpreterBytecodeArrayRegister = t5;
constexpr Register kInterpreterDispatchTableRegister = t6;
constexpr Register kInterpreterTargetBytecodeRegister = t3;
constexpr Register kJavaScriptCallArgCountRegister = a0;
constexpr Register kJavaScriptCallCodeStartRegister = a2;
constexpr Register kJavaScriptCallNewTargetRegister = a3;

View File

@ -291,8 +291,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -19,11 +19,11 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
constexpr Register kSpeculationPoisonRegister = a7;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;
constexpr Register kInterpreterDispatchTableRegister = t2;
constexpr Register kInterpreterTargetBytecodeRegister = a7;
constexpr Register kJavaScriptCallArgCountRegister = a0;
constexpr Register kJavaScriptCallCodeStartRegister = a2;
constexpr Register kJavaScriptCallNewTargetRegister = a3;

View File

@ -296,8 +296,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -20,11 +20,11 @@ constexpr Register kReturnRegister2 = r8;
constexpr Register kJSFunctionRegister = rdi;
constexpr Register kContextRegister = rsi;
constexpr Register kAllocateSizeRegister = rdx;
constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r12;
constexpr Register kInterpreterBytecodeArrayRegister = r14;
constexpr Register kInterpreterDispatchTableRegister = r15;
constexpr Register kInterpreterTargetBytecodeRegister = r11;
constexpr Register kJavaScriptCallArgCountRegister = rax;
constexpr Register kJavaScriptCallCodeStartRegister = rcx;
constexpr Register kJavaScriptCallNewTargetRegister = rdx;

View File

@ -44,6 +44,7 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
InstructionSelector selector(test_->zone(), node_count, &linkage, &sequence,
schedule, &source_position_table, nullptr,
InstructionSelector::kEnableSwitchJumpTable,
InstructionSelector::kEnableSpeculationPoison,
source_position_mode, features,
InstructionSelector::kDisableScheduling);
selector.SelectInstructions();

View File

@ -2025,6 +2025,9 @@ Matcher<Node*> IsParameter(const Matcher<int> index_matcher) {
return MakeMatcher(new IsParameterMatcher(index_matcher));
}
Matcher<Node*> IsSpeculationPoison() {
return MakeMatcher(new TestNodeMatcher(IrOpcode::kSpeculationPoison));
}
Matcher<Node*> IsLoadFramePointer() {
return MakeMatcher(new TestNodeMatcher(IrOpcode::kLoadFramePointer));

View File

@ -448,6 +448,7 @@ Matcher<Node*> IsNumberToBoolean(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsParameter(const Matcher<int> index_matcher);
Matcher<Node*> IsSpeculationPoison();
Matcher<Node*> IsLoadFramePointer();
Matcher<Node*> IsLoadParentFramePointer();
Matcher<Node*> IsPlainPrimitiveToNumber(const Matcher<Node*>& input_matcher);

View File

@ -267,38 +267,25 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
return nullptr;
}
Matcher<compiler::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSpeculationPoison() {
Matcher<compiler::Node*> current =
c::IsParameter(InterpreterDispatchDescriptor::kTargetBytecode);
int bytecode_int = static_cast<int>(bytecode());
Matcher<compiler::Node*> expected = c::IsIntPtrConstant(bytecode_int);
Matcher<compiler::Node*> diff = c::IsWordOr(
bytecode_int == 0 ? current : c::IsIntPtrSub(current, expected),
c::IsIntPtrSub(expected, current));
return IsWordNot(
c::IsWordSar(diff, c::IsIntPtrConstant(kBitsPerPointer - 1)));
}
Matcher<compiler::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsPoisonTagged(
const Matcher<compiler::Node*> value_matcher) {
return IsBitcastWordToTagged(
IsWordAnd(IsSpeculationPoison(), IsBitcastTaggedToWord(value_matcher)));
return IsBitcastWordToTagged(IsWordAnd(c::IsSpeculationPoison(),
IsBitcastTaggedToWord(value_matcher)));
}
Matcher<compiler::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsPoisonWord(
const Matcher<compiler::Node*> value_matcher) {
return IsWordAnd(IsSpeculationPoison(), value_matcher);
return IsWordAnd(c::IsSpeculationPoison(), value_matcher);
}
Matcher<compiler::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsPoisonInt32(
const Matcher<compiler::Node*> value_matcher) {
Matcher<compiler::Node*> truncated_speculation_poison =
Is64() ? c::IsTruncateInt64ToInt32(IsSpeculationPoison())
: IsSpeculationPoison();
Is64() ? c::IsTruncateInt64ToInt32(c::IsSpeculationPoison())
: c::IsSpeculationPoison();
return IsWord32And(truncated_speculation_poison, value_matcher);
}

View File

@ -51,7 +51,6 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
Matcher<Node*> IsWordNot(const Matcher<Node*>& value_matcher);
Matcher<compiler::Node*> IsSpeculationPoison();
Matcher<compiler::Node*> IsPoisonTagged(
const Matcher<compiler::Node*> value_matcher);
Matcher<compiler::Node*> IsPoisonInt32(