[Compiler] Remove untrusted code mitigations.

These are no longer enabled, so remove the code mitigation logic from
the codebase.

BUG=chromium:1003890

Change-Id: I536bb1732e8463281c21da446bbba8f47ede8ebe
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3045704
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#76256}
This commit is contained in:
Ross McIlroy 2021-08-12 12:17:00 +01:00 committed by V8 LUCI CQ
parent dacb5acd83
commit 4ab70f6b21
150 changed files with 768 additions and 3868 deletions

View File

@ -150,7 +150,6 @@ config_setting(
# v8_can_use_fpu_instructions
# v8_use_mips_abi_hardfloat
# v8_enable_gdbjit
# v8_untrusted_code_mitigations
# v8_enable_minor_mc
# v8_check_header_includes
# v8_enable_shared_ro_heap
@ -305,9 +304,6 @@ v8_config(
"V8_HAVE_TARGET_OS",
"V8_TARGET_OS_MACOSX",
],
}) + select({
":is_android_x86": [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ],
"//conditions:default": [],
}) + select({
":is_v8_enable_pointer_compression": [
"V8_COMPRESS_POINTERS",

View File

@ -228,11 +228,6 @@ declare_args() {
(is_linux || is_chromeos || is_mac)) ||
(v8_current_cpu == "ppc64" && (is_linux || is_chromeos))
# Enable mitigations for executing untrusted code.
# Disabled by default on ia32 due to conflicting requirements with embedded
# builtins.
v8_untrusted_code_mitigations = false
# Enable minor mark compact.
v8_enable_minor_mc = true
@ -469,9 +464,6 @@ if (build_with_chromium && v8_current_cpu == "arm64" &&
assert(!v8_disable_write_barriers || v8_enable_single_generation,
"Disabling write barriers works only with single generation")
assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
"Untrusted code mitigations are unsupported on ia32")
assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
"Control-flow integrity is only supported on arm64")
@ -488,9 +480,6 @@ assert(!v8_enable_map_packing || !v8_enable_pointer_compression,
assert(!v8_enable_map_packing || v8_current_cpu == "x64",
"Map packing is only supported on x64")
assert(!v8_use_multi_snapshots || !v8_control_flow_integrity,
"Control-flow integrity does not support multisnapshots")
assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
"V8 Heap Sandbox requires pointer compression")
@ -891,9 +880,6 @@ config("features") {
if (v8_enable_lazy_source_positions) {
defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ]
}
if (v8_use_multi_snapshots) {
defines += [ "V8_MULTI_SNAPSHOTS" ]
}
if (v8_use_siphash) {
defines += [ "V8_USE_SIPHASH" ]
}
@ -1189,10 +1175,6 @@ config("toolchain") {
defines += [ "V8_RUNTIME_CALL_STATS" ]
}
if (!v8_untrusted_code_mitigations) {
defines += [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ]
}
if (v8_no_inline) {
if (is_win) {
cflags += [ "/Ob0" ]
@ -1328,8 +1310,6 @@ template("asm_to_inline_asm") {
if (is_android && enable_java_templates) {
android_assets("v8_external_startup_data_assets") {
if (v8_use_external_startup_data) {
# We don't support side-by-side snapshots on Android within Chromium.
assert(!v8_use_multi_snapshots)
deps = [ "//v8" ]
renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ]
if (current_cpu == "arm" || current_cpu == "x86" ||
@ -2006,17 +1986,6 @@ if (emit_builtins_as_inline_asm) {
args = []
}
}
if (v8_use_multi_snapshots) {
run_mksnapshot("trusted") {
args = [ "--no-untrusted-code-mitigations" ]
embedded_variant = "Trusted"
}
if (emit_builtins_as_inline_asm) {
asm_to_inline_asm("trusted") {
args = []
}
}
}
action("v8_dump_build_config") {
script = "tools/testrunner/utils/dump_build_config.py"
@ -2105,16 +2074,6 @@ v8_source_set("v8_snapshot") {
deps += [ ":v8_base" ]
sources += [ "src/snapshot/snapshot-external.cc" ]
if (v8_use_multi_snapshots) {
public_deps += [ ":run_mksnapshot_trusted" ]
if (emit_builtins_as_inline_asm) {
deps += [ ":asm_to_inline_asm_trusted" ]
sources += [ "$target_gen_dir/embedded_trusted.cc" ]
} else {
sources += [ "$target_gen_dir/embedded_trusted.S" ]
}
}
} else {
# Also top-level visibility targets can depend on this.
visibility += [ "//:gn_visibility" ]

View File

@ -35,7 +35,8 @@ declare_args() {
# as an argument to profiler's method `takeHeapSnapshot`.
v8_enable_raw_heap_snapshots = false
# Enable several snapshots side-by-side (e.g. default and for trusted code).
# Deprecated flag that no longer does anything.
# TODO(rmcilroy): Remove this gn arg once it's no longer used by the bots.
v8_use_multi_snapshots = false
# Use external files for startup data blobs:
@ -99,13 +100,6 @@ if (v8_use_external_startup_data == "") {
v8_use_external_startup_data = !is_ios
}
if (v8_use_multi_snapshots) {
# Silently disable multi snapshots if they're incompatible with the current
# build configuration. This allows us to set v8_use_multi_snapshots=true on
# all bots, and e.g. no-snapshot bots will automatically do the right thing.
v8_use_multi_snapshots = v8_use_external_startup_data && !build_with_chromium
}
if (v8_enable_backtrace == "") {
v8_enable_backtrace = is_debug && !v8_optimized_debug
}

View File

@ -2777,12 +2777,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ cmp(cp, Operand(0));
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
// with both configurations. It is safe to always do this, because the
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);

View File

@ -3250,12 +3250,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&not_js_frame);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
// with both configurations. It is safe to always do this, because the
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
{
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
UseScratchRegisterScope temps(masm);

View File

@ -2723,12 +2723,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
// with both configurations. It is safe to always do this, because the
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);

View File

@ -2814,12 +2814,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
// with both configurations. It is safe to always do this, because the
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);

View File

@ -2646,12 +2646,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
// with both configurations. It is safe to always do this, because the
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);

View File

@ -2909,12 +2909,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
// with both configurations. It is safe to always do this, because the
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
UseScratchRegisterScope temp(masm);
Register scratch = temp.Acquire();

View File

@ -2679,12 +2679,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
// with both configurations. It is safe to always do this, because the
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);

View File

@ -158,9 +158,8 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin,
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
const int argc_with_recv =
(argc == kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
compiler::CodeAssemblerState state(
isolate, &zone, argc_with_recv, CodeKind::BUILTIN, name,
PoisoningMitigationLevel::kDontPoison, builtin);
compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv,
CodeKind::BUILTIN, name, builtin);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin),
@ -183,9 +182,8 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
CallInterfaceDescriptor descriptor(interface_descriptor);
// Ensure descriptor is already initialized.
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
compiler::CodeAssemblerState state(
isolate, &zone, descriptor, CodeKind::BUILTIN, name,
PoisoningMitigationLevel::kDontPoison, builtin);
compiler::CodeAssemblerState state(isolate, &zone, descriptor,
CodeKind::BUILTIN, name, builtin);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin),

View File

@ -3691,12 +3691,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
__ bind(&skip);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
// with both configurations. It is safe to always do this, because the
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
ExternalReference c_entry_fp_address = ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, masm->isolate());

View File

@ -2660,10 +2660,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {

View File

@ -560,8 +560,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
void ResetSpeculationPoisonRegister();
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this

View File

@ -336,7 +336,6 @@ constexpr Register kReturnRegister2 = r2;
constexpr Register kJSFunctionRegister = r1;
constexpr Register kContextRegister = r7;
constexpr Register kAllocateSizeRegister = r1;
constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;

View File

@ -3540,10 +3540,6 @@ void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
adr(rd, -pc_offset());
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
Mov(kSpeculationPoisonRegister, -1);
}
void TurboAssembler::RestoreFPAndLR() {
static_assert(StandardFrameConstants::kCallerFPOffset + kSystemPointerSize ==
StandardFrameConstants::kCallerPCOffset,

View File

@ -1347,8 +1347,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(const Register& rd);
void ResetSpeculationPoisonRegister();
// ---------------------------------------------------------------------------
// Pointer compression Support

View File

@ -699,8 +699,6 @@ constexpr Register kJSFunctionRegister = x1;
constexpr Register kContextRegister = cp;
constexpr Register kAllocateSizeRegister = x1;
constexpr Register kSpeculationPoisonRegister = x23;
constexpr Register kInterpreterAccumulatorRegister = x0;
constexpr Register kInterpreterBytecodeOffsetRegister = x19;
constexpr Register kInterpreterBytecodeArrayRegister = x20;

View File

@ -2193,9 +2193,10 @@ TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(
}
template <typename Array, typename TIndex, typename TValue>
TNode<TValue> CodeStubAssembler::LoadArrayElement(
TNode<Array> array, int array_header_size, TNode<TIndex> index_node,
int additional_offset, LoadSensitivity needs_poisoning) {
TNode<TValue> CodeStubAssembler::LoadArrayElement(TNode<Array> array,
int array_header_size,
TNode<TIndex> index_node,
int additional_offset) {
// TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, UintPtrT>::value ||
@ -2210,23 +2211,17 @@ TNode<TValue> CodeStubAssembler::LoadArrayElement(
CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array),
array_header_size));
constexpr MachineType machine_type = MachineTypeOf<TValue>::value;
// TODO(gsps): Remove the Load case once LoadFromObject supports poisoning
if (needs_poisoning == LoadSensitivity::kSafe) {
return UncheckedCast<TValue>(LoadFromObject(machine_type, array, offset));
} else {
return UncheckedCast<TValue>(
Load(machine_type, array, offset, needs_poisoning));
}
return UncheckedCast<TValue>(LoadFromObject(machine_type, array, offset));
}
template V8_EXPORT_PRIVATE TNode<MaybeObject>
CodeStubAssembler::LoadArrayElement<TransitionArray, IntPtrT>(
TNode<TransitionArray>, int, TNode<IntPtrT>, int, LoadSensitivity);
TNode<TransitionArray>, int, TNode<IntPtrT>, int);
template <typename TIndex>
TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
TNode<FixedArray> object, TNode<TIndex> index, int additional_offset,
LoadSensitivity needs_poisoning, CheckBounds check_bounds) {
CheckBounds check_bounds) {
// TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, UintPtrT>::value ||
@ -2238,25 +2233,22 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
if (NeedsBoundsCheck(check_bounds)) {
FixedArrayBoundsCheck(object, index, additional_offset);
}
TNode<MaybeObject> element =
LoadArrayElement(object, FixedArray::kHeaderSize, index,
additional_offset, needs_poisoning);
TNode<MaybeObject> element = LoadArrayElement(object, FixedArray::kHeaderSize,
index, additional_offset);
return CAST(element);
}
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<Smi>(TNode<FixedArray>, TNode<Smi>,
int, LoadSensitivity,
CheckBounds);
int, CheckBounds);
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<UintPtrT>(TNode<FixedArray>,
TNode<UintPtrT>, int,
LoadSensitivity,
CheckBounds);
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<IntPtrT>(TNode<FixedArray>,
TNode<IntPtrT>, int,
LoadSensitivity, CheckBounds);
CheckBounds);
void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
TNode<Smi> index,
@ -2291,9 +2283,8 @@ void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
TNode<Object> CodeStubAssembler::LoadPropertyArrayElement(
TNode<PropertyArray> object, TNode<IntPtrT> index) {
int additional_offset = 0;
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe;
return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index,
additional_offset, needs_poisoning));
additional_offset));
}
TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
@ -2648,7 +2639,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement(
TNode<WeakFixedArray> object, TNode<IntPtrT> index, int additional_offset) {
return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index,
additional_offset, LoadSensitivity::kSafe);
additional_offset);
}
TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(

View File

@ -1448,40 +1448,35 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Array is any array-like type that has a fixed header followed by
// tagged elements.
template <typename Array, typename TIndex, typename TValue = MaybeObject>
TNode<TValue> LoadArrayElement(
TNode<Array> array, int array_header_size, TNode<TIndex> index,
int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
TNode<TValue> LoadArrayElement(TNode<Array> array, int array_header_size,
TNode<TIndex> index,
int additional_offset = 0);
template <typename TIndex>
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, TNode<TIndex> index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe,
CheckBounds check_bounds = CheckBounds::kAlways);
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
TNode<Object> UnsafeLoadFixedArrayElement(
TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object,
TNode<IntPtrT> index,
int additional_offset = 0) {
return LoadFixedArrayElement(object, index, additional_offset,
needs_poisoning, CheckBounds::kDebugOnly);
CheckBounds::kDebugOnly);
}
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, int index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object, int index,
int additional_offset = 0) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
additional_offset, needs_poisoning);
additional_offset);
}
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
TNode<Object> UnsafeLoadFixedArrayElement(
TNode<FixedArray> object, int index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object, int index,
int additional_offset = 0) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
additional_offset, needs_poisoning,
CheckBounds::kDebugOnly);
additional_offset, CheckBounds::kDebugOnly);
}
TNode<Object> LoadPropertyArrayElement(TNode<PropertyArray> object,

View File

@ -2385,63 +2385,6 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode);
}
void TurboAssembler::RetpolineCall(Register reg) {
ASM_CODE_COMMENT(this);
Label setup_return, setup_target, inner_indirect_branch, capture_spec;
jmp(&setup_return); // Jump past the entire retpoline below.
bind(&inner_indirect_branch);
call(&setup_target);
bind(&capture_spec);
pause();
jmp(&capture_spec);
bind(&setup_target);
mov(Operand(esp, 0), reg);
ret(0);
bind(&setup_return);
call(&inner_indirect_branch); // Callee will return after this instruction.
}
void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
ASM_CODE_COMMENT(this);
Label setup_return, setup_target, inner_indirect_branch, capture_spec;
jmp(&setup_return); // Jump past the entire retpoline below.
bind(&inner_indirect_branch);
call(&setup_target);
bind(&capture_spec);
pause();
jmp(&capture_spec);
bind(&setup_target);
mov(Operand(esp, 0), destination, rmode);
ret(0);
bind(&setup_return);
call(&inner_indirect_branch); // Callee will return after this instruction.
}
void TurboAssembler::RetpolineJump(Register reg) {
ASM_CODE_COMMENT(this);
Label setup_target, capture_spec;
call(&setup_target);
bind(&capture_spec);
pause();
jmp(&capture_spec);
bind(&setup_target);
mov(Operand(esp, 0), reg);
ret(0);
}
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {

View File

@ -158,15 +158,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
JumpMode jump_mode = JumpMode::kJump);
void Jump(const ExternalReference& reference);
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void LoadMap(Register destination, Register object);
void RetpolineJump(Register reg);
void Trap();
void DebugBreak();
@ -480,9 +475,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
void ResetSpeculationPoisonRegister() { UNREACHABLE(); }
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this

View File

@ -161,9 +161,6 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = edi;
constexpr Register kRootRegister = ebx;
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
constexpr Register kSpeculationPoisonRegister = no_reg;
constexpr DoubleRegister kFPReturnRegister0 = xmm1; // xmm0 isn't allocatable.
} // namespace internal

View File

@ -5519,10 +5519,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {

View File

@ -817,8 +817,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
void ResetSpeculationPoisonRegister();
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this

View File

@ -362,7 +362,6 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
constexpr Register kSpeculationPoisonRegister = t3;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t4;
constexpr Register kInterpreterBytecodeArrayRegister = t5;

View File

@ -6059,10 +6059,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {

View File

@ -836,8 +836,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
void ResetSpeculationPoisonRegister();
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this

View File

@ -373,7 +373,6 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
constexpr Register kSpeculationPoisonRegister = t3;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;

View File

@ -63,31 +63,7 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
ConfigureFlags();
}
#ifdef DEBUG
bool OptimizedCompilationInfo::FlagSetIsValid(Flag flag) const {
switch (flag) {
case kPoisonRegisterArguments:
return untrusted_code_mitigations();
default:
return true;
}
UNREACHABLE();
}
bool OptimizedCompilationInfo::FlagGetIsValid(Flag flag) const {
switch (flag) {
case kPoisonRegisterArguments:
if (!GetFlag(kPoisonRegisterArguments)) return true;
return untrusted_code_mitigations() && called_with_code_start_register();
default:
return true;
}
UNREACHABLE();
}
#endif // DEBUG
void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls();
if (IsTurboprop() || FLAG_concurrent_inlining) {
@ -104,7 +80,6 @@ void OptimizedCompilationInfo::ConfigureFlags() {
case CodeKind::TURBOPROP:
set_called_with_code_start_register();
set_switch_jump_table();
if (FLAG_untrusted_code_mitigations) set_poison_register_arguments();
// TODO(yangguo): Disable this in case of debugging for crbug.com/826613
if (FLAG_analyze_environment_liveness) set_analyze_environment_liveness();
break;

View File

@ -58,21 +58,19 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
V(SourcePositions, source_positions, 4) \
V(BailoutOnUninitialized, bailout_on_uninitialized, 5) \
V(LoopPeeling, loop_peeling, 6) \
V(UntrustedCodeMitigations, untrusted_code_mitigations, 7) \
V(SwitchJumpTable, switch_jump_table, 8) \
V(CalledWithCodeStartRegister, called_with_code_start_register, 9) \
V(PoisonRegisterArguments, poison_register_arguments, 10) \
V(AllocationFolding, allocation_folding, 11) \
V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 12) \
V(TraceTurboJson, trace_turbo_json, 13) \
V(TraceTurboGraph, trace_turbo_graph, 14) \
V(TraceTurboScheduled, trace_turbo_scheduled, 15) \
V(TraceTurboAllocation, trace_turbo_allocation, 16) \
V(TraceHeapBroker, trace_heap_broker, 17) \
V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \
V(ConcurrentInlining, concurrent_inlining, 19) \
V(DiscardResultForTesting, discard_result_for_testing, 20) \
V(InlineJSWasmCalls, inline_js_wasm_calls, 21)
V(SwitchJumpTable, switch_jump_table, 7) \
V(CalledWithCodeStartRegister, called_with_code_start_register, 8) \
V(AllocationFolding, allocation_folding, 9) \
V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 10) \
V(TraceTurboJson, trace_turbo_json, 11) \
V(TraceTurboGraph, trace_turbo_graph, 12) \
V(TraceTurboScheduled, trace_turbo_scheduled, 13) \
V(TraceTurboAllocation, trace_turbo_allocation, 14) \
V(TraceHeapBroker, trace_heap_broker, 15) \
V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 16) \
V(ConcurrentInlining, concurrent_inlining, 17) \
V(DiscardResultForTesting, discard_result_for_testing, 18) \
V(InlineJSWasmCalls, inline_js_wasm_calls, 19)
enum Flag {
#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
@ -82,7 +80,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#define DEF_GETTER(Camel, Lower, Bit) \
bool Lower() const { \
DCHECK(FlagGetIsValid(k##Camel)); \
return GetFlag(k##Camel); \
}
FLAGS(DEF_GETTER)
@ -90,17 +87,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#define DEF_SETTER(Camel, Lower, Bit) \
void set_##Lower() { \
DCHECK(FlagSetIsValid(k##Camel)); \
SetFlag(k##Camel); \
}
FLAGS(DEF_SETTER)
#undef DEF_SETTER
#ifdef DEBUG
bool FlagGetIsValid(Flag flag) const;
bool FlagSetIsValid(Flag flag) const;
#endif // DEBUG
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
@ -141,13 +132,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
}
compiler::NodeObserver* node_observer() const { return node_observer_; }
void SetPoisoningMitigationLevel(PoisoningMitigationLevel poisoning_level) {
poisoning_level_ = poisoning_level;
}
PoisoningMitigationLevel GetPoisoningMitigationLevel() const {
return poisoning_level_;
}
// Code getters and setters.
void SetCode(Handle<Code> code);
@ -269,8 +253,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Compilation flags.
unsigned flags_ = 0;
PoisoningMitigationLevel poisoning_level_ =
PoisoningMitigationLevel::kDontPoison;
const CodeKind code_kind_;
Builtin builtin_ = Builtin::kNoBuiltinId;

View File

@ -3504,10 +3504,6 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
addi(sp, sp, Operand(2 * kSimd128Size));
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
CmpS64(x, Operand(y), r0);
beq(dest);

View File

@ -747,8 +747,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
void ResetSpeculationPoisonRegister();
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this

View File

@ -349,7 +349,6 @@ constexpr Register kReturnRegister2 = r5;
constexpr Register kJSFunctionRegister = r4;
constexpr Register kContextRegister = r30;
constexpr Register kAllocateSizeRegister = r4;
constexpr Register kSpeculationPoisonRegister = r14;
constexpr Register kInterpreterAccumulatorRegister = r3;
constexpr Register kInterpreterBytecodeOffsetRegister = r15;
constexpr Register kInterpreterBytecodeArrayRegister = r16;

View File

@ -102,42 +102,6 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
GetDefaultRegisterConfiguration)
// Allocatable registers with the masking register removed.
class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
public:
ArchDefaultPoisoningRegisterConfiguration()
: RegisterConfiguration(
Register::kNumRegisters, DoubleRegister::kNumRegisters,
kMaxAllocatableGeneralRegisterCount - 1,
get_num_allocatable_double_registers(),
InitializeGeneralRegisterCodes(), get_allocatable_double_codes(),
kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) {
}
private:
static const int* InitializeGeneralRegisterCodes() {
int filtered_index = 0;
for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
if (kAllocatableGeneralCodes[i] != kSpeculationPoisonRegister.code()) {
allocatable_general_codes_[filtered_index] =
kAllocatableGeneralCodes[i];
filtered_index++;
}
}
DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1);
return allocatable_general_codes_;
}
static int
allocatable_general_codes_[kMaxAllocatableGeneralRegisterCount - 1];
};
int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_
[kMaxAllocatableGeneralRegisterCount - 1];
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultPoisoningRegisterConfiguration,
GetDefaultPoisoningRegisterConfiguration)
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
// to use less registers. Currently, it's only used by RecordWrite code stub.
@ -184,10 +148,6 @@ const RegisterConfiguration* RegisterConfiguration::Default() {
return GetDefaultRegisterConfiguration();
}
const RegisterConfiguration* RegisterConfiguration::Poisoning() {
return GetDefaultPoisoningRegisterConfiguration();
}
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) {
int num = NumRegs(registers);

View File

@ -4743,10 +4743,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {

View File

@ -857,8 +857,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
void ResetSpeculationPoisonRegister();
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this

View File

@ -344,7 +344,6 @@ constexpr Register kReturnRegister2 = a2;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a1;
constexpr Register kSpeculationPoisonRegister = a7;
constexpr Register kInterpreterAccumulatorRegister = a0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;

View File

@ -4670,10 +4670,6 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kSimd128Size));
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
larl(dst, Operand(-pc_offset() / 2));
}

View File

@ -1015,7 +1015,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
void ResetSpeculationPoisonRegister();
void ComputeCodeStartAddress(Register dst);
void LoadPC(Register dst);

View File

@ -253,7 +253,6 @@ constexpr Register kReturnRegister2 = r4;
constexpr Register kJSFunctionRegister = r3;
constexpr Register kContextRegister = r13;
constexpr Register kAllocateSizeRegister = r3;
constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r2;
constexpr Register kInterpreterBytecodeOffsetRegister = r6;
constexpr Register kInterpreterBytecodeArrayRegister = r7;

View File

@ -1993,47 +1993,6 @@ void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
}
}
void TurboAssembler::RetpolineCall(Register reg) {
ASM_CODE_COMMENT(this);
Label setup_return, setup_target, inner_indirect_branch, capture_spec;
jmp(&setup_return); // Jump past the entire retpoline below.
bind(&inner_indirect_branch);
call(&setup_target);
bind(&capture_spec);
pause();
jmp(&capture_spec);
bind(&setup_target);
movq(Operand(rsp, 0), reg);
ret(0);
bind(&setup_return);
call(&inner_indirect_branch); // Callee will return after this instruction.
}
void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
RetpolineCall(kScratchRegister);
}
void TurboAssembler::RetpolineJump(Register reg) {
ASM_CODE_COMMENT(this);
Label setup_target, capture_spec;
call(&setup_target);
bind(&capture_spec);
pause();
jmp(&capture_spec);
bind(&setup_target);
movq(Operand(rsp, 0), reg);
ret(0);
}
void TurboAssembler::Pmaddwd(XMMRegister dst, XMMRegister src1, Operand src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -3523,11 +3482,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
leaq(dst, Operand(&current, -pc));
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
// TODO(turbofan): Perhaps, we want to put an lfence here.
Move(kSpeculationPoisonRegister, -1);
}
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {

View File

@ -432,17 +432,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void CallCodeTObject(Register code);
void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(const ExternalReference& reference);
void Jump(Operand op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Condition cc = always);
void RetpolineJump(Register reg);
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@ -632,8 +627,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
void ResetSpeculationPoisonRegister();
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this

View File

@ -212,7 +212,6 @@ constexpr Register kReturnRegister2 = r8;
constexpr Register kJSFunctionRegister = rdi;
constexpr Register kContextRegister = rsi;
constexpr Register kAllocateSizeRegister = rdx;
constexpr Register kSpeculationPoisonRegister = r11;
constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r9;
constexpr Register kInterpreterBytecodeArrayRegister = r12;

View File

@ -1701,20 +1701,6 @@ enum IsolateAddressId {
kIsolateAddressCount
};
enum class PoisoningMitigationLevel {
kPoisonAll,
kDontPoison,
kPoisonCriticalOnly
};
enum class LoadSensitivity {
kCritical, // Critical loads are poisoned whenever we can run untrusted
// code (i.e., when --untrusted-code-mitigations is on).
kUnsafe, // Unsafe loads are poisoned when full poisoning is on
// (--branch-load-poisoning).
kSafe // Safe loads are never poisoned.
};
// The reason for a WebAssembly trap.
#define FOREACH_WASM_TRAPREASON(V) \
V(TrapUnreachable) \

View File

@ -82,25 +82,25 @@ FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kFullWriteBarrier, LoadSensitivity::kCritical};
kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer() {
FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::TaggedPointer(),
kPointerWriteBarrier, LoadSensitivity::kCritical};
FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Internal(), MachineType::TaggedPointer(),
kPointerWriteBarrier, LoadSensitivity::kCritical};
FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Internal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@ -410,7 +410,7 @@ FieldAccess AccessBuilder::ForJSTypedArrayBasePointer() {
FieldAccess access = {kTaggedBase, JSTypedArray::kBasePointerOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::AnyTagged(),
kFullWriteBarrier, LoadSensitivity::kCritical};
kFullWriteBarrier};
return access;
}
@ -424,7 +424,6 @@ FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
: Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
LoadSensitivity::kCritical,
ConstFieldInfo::None(),
false,
#ifdef V8_HEAP_SANDBOX
@ -445,7 +444,6 @@ FieldAccess AccessBuilder::ForJSDataViewDataPointer() {
: Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
LoadSensitivity::kUnsafe,
ConstFieldInfo::None(),
false,
#ifdef V8_HEAP_SANDBOX
@ -756,7 +754,6 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
: Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
LoadSensitivity::kUnsafe,
ConstFieldInfo::None(),
false,
#ifdef V8_HEAP_SANDBOX
@ -902,10 +899,10 @@ FieldAccess AccessBuilder::ForWeakFixedArraySlot(int index) {
}
// static
FieldAccess AccessBuilder::ForCellValue() {
FieldAccess access = {kTaggedBase, Cell::kValueOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kFullWriteBarrier, LoadSensitivity::kCritical};
FieldAccess access = {kTaggedBase, Cell::kValueOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kFullWriteBarrier};
return access;
}
@ -966,11 +963,9 @@ ElementAccess AccessBuilder::ForSloppyArgumentsElementsMappedEntry() {
}
// statics
ElementAccess AccessBuilder::ForFixedArrayElement(
ElementsKind kind, LoadSensitivity load_sensitivity) {
ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize,
Type::Any(), MachineType::AnyTagged(),
kFullWriteBarrier, load_sensitivity};
ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
MachineType::AnyTagged(), kFullWriteBarrier};
switch (kind) {
case PACKED_SMI_ELEMENTS:
access.type = Type::SignedSmall();
@ -1038,59 +1033,50 @@ FieldAccess AccessBuilder::ForEnumCacheIndices() {
}
// static
ElementAccess AccessBuilder::ForTypedArrayElement(
ExternalArrayType type, bool is_external,
LoadSensitivity load_sensitivity) {
ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
bool is_external) {
BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
int header_size = is_external ? 0 : ByteArray::kHeaderSize;
switch (type) {
case kExternalInt8Array: {
ElementAccess access = {taggedness, header_size,
Type::Signed32(), MachineType::Int8(),
kNoWriteBarrier, load_sensitivity};
ElementAccess access = {taggedness, header_size, Type::Signed32(),
MachineType::Int8(), kNoWriteBarrier};
return access;
}
case kExternalUint8Array:
case kExternalUint8ClampedArray: {
ElementAccess access = {taggedness, header_size,
Type::Unsigned32(), MachineType::Uint8(),
kNoWriteBarrier, load_sensitivity};
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
MachineType::Uint8(), kNoWriteBarrier};
return access;
}
case kExternalInt16Array: {
ElementAccess access = {taggedness, header_size,
Type::Signed32(), MachineType::Int16(),
kNoWriteBarrier, load_sensitivity};
ElementAccess access = {taggedness, header_size, Type::Signed32(),
MachineType::Int16(), kNoWriteBarrier};
return access;
}
case kExternalUint16Array: {
ElementAccess access = {taggedness, header_size,
Type::Unsigned32(), MachineType::Uint16(),
kNoWriteBarrier, load_sensitivity};
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
MachineType::Uint16(), kNoWriteBarrier};
return access;
}
case kExternalInt32Array: {
ElementAccess access = {taggedness, header_size,
Type::Signed32(), MachineType::Int32(),
kNoWriteBarrier, load_sensitivity};
ElementAccess access = {taggedness, header_size, Type::Signed32(),
MachineType::Int32(), kNoWriteBarrier};
return access;
}
case kExternalUint32Array: {
ElementAccess access = {taggedness, header_size,
Type::Unsigned32(), MachineType::Uint32(),
kNoWriteBarrier, load_sensitivity};
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
MachineType::Uint32(), kNoWriteBarrier};
return access;
}
case kExternalFloat32Array: {
ElementAccess access = {taggedness, header_size,
Type::Number(), MachineType::Float32(),
kNoWriteBarrier, load_sensitivity};
ElementAccess access = {taggedness, header_size, Type::Number(),
MachineType::Float32(), kNoWriteBarrier};
return access;
}
case kExternalFloat64Array: {
ElementAccess access = {taggedness, header_size,
Type::Number(), MachineType::Float64(),
kNoWriteBarrier, load_sensitivity};
ElementAccess access = {taggedness, header_size, Type::Number(),
MachineType::Float64(), kNoWriteBarrier};
return access;
}
case kExternalBigInt64Array:

View File

@ -299,9 +299,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
static ElementAccess ForFixedArrayElement(
ElementsKind kind,
LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
static ElementAccess ForFixedArrayElement(ElementsKind kind);
// Provides access to SloppyArgumentsElements elements.
static ElementAccess ForSloppyArgumentsElementsMappedEntry();
@ -319,9 +317,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForEnumCacheIndices();
// Provides access to Fixed{type}TypedArray and External{type}Array elements.
static ElementAccess ForTypedArrayElement(
ExternalArrayType type, bool is_external,
LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
// Provides access to HashTable fields.
static FieldAccess ForHashTableBaseNumberOfElements();

View File

@ -36,9 +36,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
case kFlags_branch_and_poison:
case kFlags_deoptimize:
case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
case kFlags_select:
@ -322,35 +320,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode,
ArmOperandConverter const& i) {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->and_(value, value, Operand(kSpeculationPoisonRegister));
}
}
void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
InstructionCode opcode,
ArmOperandConverter const& i,
Register address) {
DCHECK_EQ(kMemoryAccessPoisoned, AccessModeField::decode(opcode));
switch (AddressingModeField::decode(opcode)) {
case kMode_Offset_RI:
codegen->tasm()->mov(address, i.InputImmediate(1));
codegen->tasm()->add(address, address, i.InputRegister(0));
break;
case kMode_Offset_RR:
codegen->tasm()->add(address, i.InputRegister(0), i.InputRegister(1));
break;
default:
UNREACHABLE();
}
codegen->tasm()->and_(address, address, Operand(kSpeculationPoisonRegister));
}
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@ -691,25 +660,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne);
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
// Set a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
__ ComputeCodeStartAddress(scratch);
__ cmp(kJavaScriptCallCodeStartRegister, scratch);
__ mov(kSpeculationPoisonRegister, Operand(-1), SBit::LeaveCC, eq);
__ mov(kSpeculationPoisonRegister, Operand(0), SBit::LeaveCC, ne);
__ csdb();
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
__ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
__ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
__ and_(sp, sp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@ -1619,12 +1569,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsb:
__ ldrsb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrb:
__ strb(i.InputRegister(0), i.InputOffset(1));
@ -1632,11 +1580,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsh:
__ ldrsh(i.OutputRegister(), i.InputOffset());
EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrh:
__ strh(i.InputRegister(0), i.InputOffset(1));
@ -1644,22 +1590,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStr:
__ str(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF32: {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
UseScratchRegisterScope temps(tasm());
Register address = temps.Acquire();
ComputePoisonedAddressForLoad(this, opcode, i, address);
__ vldr(i.OutputFloatRegister(), address, 0);
} else {
__ vldr(i.OutputFloatRegister(), i.InputOffset());
}
__ vldr(i.OutputFloatRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@ -1688,15 +1625,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVldrF64: {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
UseScratchRegisterScope temps(tasm());
Register address = temps.Acquire();
ComputePoisonedAddressForLoad(this, opcode, i, address);
__ vldr(i.OutputDoubleRegister(), address, 0);
} else {
__ vldr(i.OutputDoubleRegister(), i.InputOffset());
}
__ vldr(i.OutputDoubleRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@ -1832,10 +1761,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isb(SY);
break;
}
case kArchWordPoisonOnSpeculation:
__ and_(i.OutputRegister(0), i.InputRegister(0),
Operand(kSpeculationPoisonRegister));
break;
case kArmVmullLow: {
auto dt = static_cast<NeonDataType>(MiscField::decode(instr->opcode()));
__ vmull(dt, i.OutputSimd128Register(), i.InputSimd128Register(0).low(),
@ -3597,20 +3522,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
condition = NegateFlagsCondition(condition);
__ eor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
Operand(kSpeculationPoisonRegister), SBit::LeaveCC,
FlagsConditionToCondition(condition));
__ csdb();
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@ -3805,7 +3716,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();

View File

@ -630,17 +630,11 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
InstructionOperand output = g.DefineAsRegister(node);
EmitLoad(this, opcode, &output, base, index);
}
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();

View File

@ -460,47 +460,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
}
#endif // V8_ENABLE_WEBASSEMBLY
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
Arm64OperandConverter const& i) {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
Register poison = value.Is64Bits() ? kSpeculationPoisonRegister
: kSpeculationPoisonRegister.W();
codegen->tasm()->And(value, value, Operand(poison));
}
}
void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
Arm64OperandConverter* i, VRegister output_reg) {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
AddressingMode address_mode = AddressingModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) {
UseScratchRegisterScope temps(codegen->tasm());
Register address = temps.AcquireX();
switch (address_mode) {
case kMode_MRI: // Fall through.
case kMode_MRR:
codegen->tasm()->Add(address, i->InputRegister(0), i->InputOperand(1));
break;
case kMode_Operand2_R_LSL_I:
codegen->tasm()->Add(address, i->InputRegister(0),
i->InputOperand2_64(1));
break;
default:
// Note: we don't need poisoning for kMode_Root loads as those loads
// target a fixed offset from root register which is set once when
// initializing the vm.
UNREACHABLE();
}
codegen->tasm()->And(address, address, Operand(kSpeculationPoisonRegister));
codegen->tasm()->Ldr(output_reg, MemOperand(address));
} else {
codegen->tasm()->Ldr(output_reg, i->MemoryOperand());
}
}
// Handles unary ops that work for float (scalar), double (scalar), or NEON.
template <typename Fn>
void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
@ -714,29 +673,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Bind(&not_deoptimized);
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.AcquireX();
// Set a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
__ ComputeCodeStartAddress(scratch);
__ Cmp(kJavaScriptCallCodeStartRegister, scratch);
__ Csetm(kSpeculationPoisonRegister, eq);
__ Csdb();
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.AcquireX();
__ Mov(scratch, sp);
__ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
__ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
__ And(scratch, scratch, kSpeculationPoisonRegister);
__ Mov(sp, scratch);
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@ -1822,12 +1758,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@ -1836,12 +1770,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@ -1850,12 +1782,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrsw:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64StrW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@ -1864,19 +1794,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldr:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressTaggedSigned:
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressTaggedPointer:
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressAnyTagged:
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Str:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@ -1887,7 +1813,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64LdrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S());
__ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
break;
case kArm64StrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@ -1895,7 +1821,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64LdrD:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister());
__ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kArm64StrD:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@ -1916,10 +1842,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Dsb(FullSystem, BarrierAll);
__ Isb();
break;
case kArchWordPoisonOnSpeculation:
__ And(i.OutputRegister(0), i.InputRegister(0),
Operand(kSpeculationPoisonRegister));
break;
case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
@ -2907,7 +2829,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
ArchOpcode opcode = instr->arch_opcode();
if (opcode == kArm64CompareAndBranch32) {
DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister32(0), tlabel);
@ -2919,7 +2840,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64CompareAndBranch) {
DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister64(0), tlabel);
@ -2931,7 +2851,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch32) {
DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
@ -2943,7 +2862,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch) {
DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
@ -2961,19 +2879,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
condition = NegateFlagsCondition(condition);
__ CmovX(kSpeculationPoisonRegister, xzr,
FlagsConditionToCondition(condition));
__ Csdb();
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@ -3143,7 +3048,6 @@ void CodeGenerator::AssembleConstructFrame() {
// arguments count was pushed.
required_slots -=
unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue;
ResetSpeculationPoison();
}
#if V8_ENABLE_WEBASSEMBLY

View File

@ -837,10 +837,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
if (node->opcode() == IrOpcode::kProtectedLoad) {
opcode |= AccessModeField::encode(kMemoryAccessProtected);
}
@ -848,8 +844,6 @@ void InstructionSelector::VisitLoad(Node* node) {
EmitLoad(this, node, opcode, immediate_mode, rep);
}
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitStore(Node* node) {
@ -2314,9 +2308,6 @@ template <int N>
bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node,
typename CbzOrTbzMatchTrait<N>::IntegralType value,
Node* user, FlagsCondition cond, FlagsContinuation* cont) {
// Branch poisoning requires flags to be set, so when it's enabled for
// a particular branch, we shouldn't be applying the cbz/tbz optimization.
DCHECK(!cont->IsPoisoned());
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
@ -2404,7 +2395,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
std::swap(left, right);
}
if (opcode == kArm64Cmp && !cont->IsPoisoned()) {
if (opcode == kArm64Cmp) {
Int64Matcher m(right);
if (m.HasResolvedValue()) {
if (TryEmitCbzOrTbz<64>(selector, left, m.ResolvedValue(), node,
@ -2422,19 +2413,17 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
if (!cont->IsPoisoned()) {
if (m.right().HasResolvedValue()) {
if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
m.right().ResolvedValue(), node, cond, cont)) {
return;
}
} else if (m.left().HasResolvedValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
m.left().ResolvedValue(), node, commuted_cond,
cont)) {
return;
}
if (m.right().HasResolvedValue()) {
if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
m.right().ResolvedValue(), node, cond, cont)) {
return;
}
} else if (m.left().HasResolvedValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
m.left().ResolvedValue(), node, commuted_cond,
cont)) {
return;
}
}
ArchOpcode opcode = kArm64Cmp32;
@ -2523,8 +2512,7 @@ struct TestAndBranchMatcher {
Matcher matcher_;
void Initialize() {
if (cont_->IsBranch() && !cont_->IsPoisoned() &&
matcher_.right().HasResolvedValue() &&
if (cont_->IsBranch() && matcher_.right().HasResolvedValue() &&
base::bits::IsPowerOfTwo(matcher_.right().ResolvedValue())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont_->condition() == kEqual) ||
@ -2832,7 +2820,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
}
// Branch could not be combined with a compare, compare against 0 and branch.
if (!cont->IsPoisoned() && cont->IsBranch()) {
if (cont->IsBranch()) {
Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
g.UseRegister(value), g.Label(cont->true_block()),
g.Label(cont->false_block()));

View File

@ -41,14 +41,16 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
CodeGenerator::CodeGenerator(
Zone* codegen_zone, Frame* frame, Linkage* linkage,
InstructionSequence* instructions, OptimizedCompilationInfo* info,
Isolate* isolate, base::Optional<OsrHelper> osr_helper,
int start_source_position, JumpOptimizationInfo* jump_opt,
PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
Builtin builtin, size_t max_unoptimized_frame_height,
size_t max_pushed_argument_count, const char* debug_name)
CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
InstructionSequence* instructions,
OptimizedCompilationInfo* info, Isolate* isolate,
base::Optional<OsrHelper> osr_helper,
int start_source_position,
JumpOptimizationInfo* jump_opt,
const AssemblerOptions& options, Builtin builtin,
size_t max_unoptimized_frame_height,
size_t max_pushed_argument_count,
const char* debug_name)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@ -80,7 +82,6 @@ CodeGenerator::CodeGenerator(
codegen_zone, SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
protected_instructions_(codegen_zone),
result_(kSuccess),
poisoning_level_(poisoning_level),
block_starts_(codegen_zone),
instr_starts_(codegen_zone),
debug_name_(debug_name) {
@ -284,9 +285,6 @@ void CodeGenerator::AssembleCode() {
BailoutIfDeoptimized();
}
offsets_info_.init_poison = tasm()->pc_offset();
InitializeSpeculationPoison();
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
@ -355,8 +353,6 @@ void CodeGenerator::AssembleCode() {
tasm()->bind(GetLabel(current_block_));
TryInsertBranchPoisoning(block);
if (block->must_construct_frame()) {
AssembleConstructFrame();
// We need to setup the root register after we assemble the prologue, to
@ -494,37 +490,6 @@ void CodeGenerator::AssembleCode() {
result_ = kSuccess;
}
void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
// See if our predecessor was a basic block terminated by a branch_and_poison
// instruction. If yes, then perform the masking based on the flags.
if (block->PredecessorCount() != 1) return;
RpoNumber pred_rpo = (block->predecessors())[0];
const InstructionBlock* pred = instructions()->InstructionBlockAt(pred_rpo);
if (pred->code_start() == pred->code_end()) return;
Instruction* instr = instructions()->InstructionAt(pred->code_end() - 1);
FlagsMode mode = FlagsModeField::decode(instr->opcode());
switch (mode) {
case kFlags_branch_and_poison: {
BranchInfo branch;
RpoNumber target = ComputeBranchInfo(&branch, instr);
if (!target.IsValid()) {
// Non-trivial branch, add the masking code.
FlagsCondition condition = branch.condition;
if (branch.false_label == GetLabel(block->rpo_number())) {
condition = NegateFlagsCondition(condition);
}
AssembleBranchPoisoning(condition, instr);
}
break;
}
case kFlags_deoptimize_and_poison: {
UNREACHABLE();
}
default:
break;
}
}
void CodeGenerator::AssembleArchBinarySearchSwitchRange(
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end) {
@ -839,8 +804,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
switch (mode) {
case kFlags_branch:
case kFlags_branch_and_poison: {
case kFlags_branch: {
BranchInfo branch;
RpoNumber target = ComputeBranchInfo(&branch, instr);
if (target.IsValid()) {
@ -854,8 +818,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
AssembleArchBranch(instr, &branch);
break;
}
case kFlags_deoptimize:
case kFlags_deoptimize_and_poison: {
case kFlags_deoptimize: {
// Assemble a conditional eager deoptimization after this instruction.
InstructionOperandConverter i(this, instr);
size_t frame_state_offset =
@ -864,17 +827,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
DeoptImmedArgsCountField::decode(instr->opcode());
DeoptimizationExit* const exit = AddDeoptimizationExit(
instr, frame_state_offset, immediate_args_count);
Label continue_label;
BranchInfo branch;
branch.condition = condition;
branch.true_label = exit->label();
branch.false_label = &continue_label;
branch.false_label = exit->continue_label();
branch.fallthru = true;
AssembleArchDeoptBranch(instr, &branch);
tasm()->bind(&continue_label);
if (mode == kFlags_deoptimize_and_poison) {
AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
}
tasm()->bind(exit->continue_label());
break;
}
@ -900,11 +858,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
}
}
// TODO(jarin) We should thread the flag through rather than set it.
if (instr->IsCall()) {
ResetSpeculationPoison();
}
return kSuccess;
}
@ -1087,9 +1040,9 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
if (needs_frame_state) {
MarkLazyDeoptSite();
// If the frame state is present, it starts at argument 2 - after
// the code address and the poison-alias index.
size_t frame_state_offset = 2;
// If the frame state is present, it starts at argument 1 - after
// the code address.
size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
int pc_offset = tasm()->pc_offset_for_safepoint();
@ -1428,29 +1381,6 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
OutputFrameStateCombine::Ignore());
}
void CodeGenerator::InitializeSpeculationPoison() {
if (poisoning_level_ == PoisoningMitigationLevel::kDontPoison) return;
// Initialize {kSpeculationPoisonRegister} either by comparing the expected
// with the actual call target, or by unconditionally using {-1} initially.
// Masking register arguments with it only makes sense in the first case.
if (info()->called_with_code_start_register()) {
tasm()->RecordComment("-- Prologue: generate speculation poison --");
GenerateSpeculationPoisonFromCodeStartRegister();
if (info()->poison_register_arguments()) {
AssembleRegisterArgumentPoisoning();
}
} else {
ResetSpeculationPoison();
}
}
void CodeGenerator::ResetSpeculationPoison() {
if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
tasm()->ResetSpeculationPoisonRegister();
}
}
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
gen->ools_ = this;

View File

@ -103,7 +103,6 @@ class DeoptimizationLiteral {
struct TurbolizerCodeOffsetsInfo {
int code_start_register_check = -1;
int deopt_check = -1;
int init_poison = -1;
int blocks_start = -1;
int out_of_line_code = -1;
int deoptimization_exits = -1;
@ -120,14 +119,16 @@ struct TurbolizerInstructionStartInfo {
// Generates native code for a sequence of instructions.
class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
public:
explicit CodeGenerator(
Zone* codegen_zone, Frame* frame, Linkage* linkage,
InstructionSequence* instructions, OptimizedCompilationInfo* info,
Isolate* isolate, base::Optional<OsrHelper> osr_helper,
int start_source_position, JumpOptimizationInfo* jump_opt,
PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
Builtin builtin, size_t max_unoptimized_frame_height,
size_t max_pushed_argument_count, const char* debug_name = nullptr);
explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
InstructionSequence* instructions,
OptimizedCompilationInfo* info, Isolate* isolate,
base::Optional<OsrHelper> osr_helper,
int start_source_position,
JumpOptimizationInfo* jump_opt,
const AssemblerOptions& options, Builtin builtin,
size_t max_unoptimized_frame_height,
size_t max_pushed_argument_count,
const char* debug_name = nullptr);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
@ -216,17 +217,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// Assemble instructions for the specified block.
CodeGenResult AssembleBlock(const InstructionBlock* block);
// Inserts mask update at the beginning of an instruction block if the
// predecessor blocks ends with a masking branch.
void TryInsertBranchPoisoning(const InstructionBlock* block);
// Initializes the masking register in the prologue of a function.
void InitializeSpeculationPoison();
// Reset the masking register during execution of a function.
void ResetSpeculationPoison();
// Generates a mask from the pc passed in {kJavaScriptCallCodeStartRegister}.
void GenerateSpeculationPoisonFromCodeStartRegister();
// Assemble code for the specified instruction.
CodeGenResult AssembleInstruction(int instruction_index,
const InstructionBlock* block);
@ -276,18 +266,12 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// contains the expected pointer to the start of the instruction stream.
void AssembleCodeStartRegisterCheck();
void AssembleBranchPoisoning(FlagsCondition condition, Instruction* instr);
// When entering a code that is marked for deoptimization, rather continuing
// with its execution, we jump to a lazy compiled code. We need to do this
// because this code has already been deoptimized and needs to be unlinked
// from the JS functions referring it.
void BailoutIfDeoptimized();
// Generates code to poison the stack pointer and implicit register arguments
// like the context register and the function register.
void AssembleRegisterArgumentPoisoning();
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
void AssembleConstructFrame();
@ -484,7 +468,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
SourcePositionTableBuilder source_position_table_builder_;
ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
CodeGenResult result_;
PoisoningMitigationLevel poisoning_level_;
ZoneVector<int> block_starts_;
TurbolizerCodeOffsetsInfo offsets_info_;
ZoneVector<TurbolizerInstructionStartInfo> instr_starts_;

View File

@ -684,16 +684,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ bind(&skip);
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@ -712,11 +702,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
}
__ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@ -738,19 +724,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ wasm_call(wasm_code, constant.rmode());
} else {
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(wasm_code, constant.rmode());
} else {
__ call(wasm_code, constant.rmode());
}
__ call(wasm_code, constant.rmode());
}
} else {
Register reg = i.InputRegister(0);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
}
__ call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@ -762,12 +739,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Address wasm_code = static_cast<Address>(constant.ToInt32());
__ jmp(wasm_code, constant.rmode());
} else {
Register reg = i.InputRegister(0);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
}
__ jmp(i.InputRegister(0));
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@ -784,11 +756,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
}
__ jmp(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@ -800,11 +768,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
}
__ jmp(reg);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
@ -1278,9 +1242,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Bswap:
__ bswap(i.OutputRegister());
break;
case kArchWordPoisonOnSpeculation:
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
case kIA32MFence:
__ mfence();
break;
@ -4171,12 +4132,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel);
}
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);

View File

@ -564,15 +564,9 @@ void InstructionSelector::VisitLoad(Node* node) {
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
code |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();

View File

@ -100,7 +100,6 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
V(ArchStackSlot) \
V(ArchWordPoisonOnSpeculation) \
V(ArchStackPointerGreaterThan) \
V(ArchStackCheckOffset) \
V(Word32AtomicLoadInt8) \
@ -208,12 +207,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum FlagsMode {
kFlags_none = 0,
kFlags_branch = 1,
kFlags_branch_and_poison = 2,
kFlags_deoptimize = 3,
kFlags_deoptimize_and_poison = 4,
kFlags_set = 5,
kFlags_trap = 6,
kFlags_select = 7,
kFlags_deoptimize = 2,
kFlags_set = 3,
kFlags_trap = 4,
kFlags_select = 5,
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@ -262,7 +259,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum MemoryAccessMode {
kMemoryAccessDirect = 0,
kMemoryAccessProtected = 1,
kMemoryAccessPoisoned = 2
};
// The InstructionCode is an opaque, target-specific integer that encodes

View File

@ -132,7 +132,6 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
// We should not have branches in the middle of a block.
DCHECK_NE(instr->flags_mode(), kFlags_branch);
DCHECK_NE(instr->flags_mode(), kFlags_branch_and_poison);
if (IsFixedRegisterParameter(instr)) {
if (last_live_in_reg_marker_ != nullptr) {
@ -298,11 +297,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
// effects.
return kIsLoadOperation;
case kArchWordPoisonOnSpeculation:
// While poisoning operations have no side effect, they must not be
// reordered relative to branches.
return kHasSideEffect;
case kArchPrepareCallCFunction:
case kArchPrepareTailCall:
case kArchTailCallCodeObject:

View File

@ -39,7 +39,7 @@ InstructionSelector::InstructionSelector(
size_t* max_pushed_argument_count, SourcePositionMode source_position_mode,
Features features, EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
EnableTraceTurboJson trace_turbo)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
@ -63,7 +63,6 @@ InstructionSelector::InstructionSelector(
enable_roots_relative_addressing_(enable_roots_relative_addressing),
enable_switch_jump_table_(enable_switch_jump_table),
state_values_cache_(zone),
poisoning_level_(poisoning_level),
frame_(frame),
instruction_selection_failed_(false),
instr_origins_(sequence->zone()),
@ -1076,17 +1075,10 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
DCHECK_EQ(1u, buffer->instruction_args.size());
// Argument 1 is used for poison-alias index (encoded in a word-sized
// immediate. This an index of the operand that aliases with poison register
// or -1 if there is no aliasing.
buffer->instruction_args.push_back(g.TempImmediate(-1));
const size_t poison_alias_index = 1;
DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index);
// If the call needs a frame state, we insert the state information as
// follows (n is the number of value inputs to the frame state):
// arg 2 : deoptimization id.
// arg 3 - arg (n + 2) : value inputs to the frame state.
// arg 1 : deoptimization id.
// arg 2 - arg (n + 2) : value inputs to the frame state.
size_t frame_state_entries = 0;
USE(frame_state_entries); // frame_state_entries is only used for debug.
if (buffer->frame_state_descriptor != nullptr) {
@ -1123,7 +1115,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
&buffer->instruction_args, FrameStateInputKind::kStackSlot,
instruction_zone());
DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size());
DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
}
size_t input_count = static_cast<size_t>(buffer->input_count());
@ -1159,23 +1151,11 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->pushed_nodes[stack_index] = param;
pushed_count++;
} else {
// If we do load poisoning and the linkage uses the poisoning register,
// then we request the input in memory location, and during code
// generation, we move the input to the register.
if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
unallocated.HasFixedRegisterPolicy()) {
int reg = unallocated.fixed_register_index();
if (Register::from_code(reg) == kSpeculationPoisonRegister) {
buffer->instruction_args[poison_alias_index] = g.TempImmediate(
static_cast<int32_t>(buffer->instruction_args.size()));
op = g.UseRegisterOrSlotOrConstant(*iter);
}
}
buffer->instruction_args.push_back(op);
}
}
DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
frame_state_entries - 1);
frame_state_entries);
if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && is_tail_call &&
stack_param_delta != 0) {
// For tail calls that change the size of their parameter list and keep
@ -1509,11 +1489,6 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadLane(node);
}
case IrOpcode::kPoisonedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitPoisonedLoad(node);
}
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kProtectedStore:
@ -1850,12 +1825,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
case IrOpcode::kTaggedPoisonOnSpeculation:
return MarkAsTagged(node), VisitTaggedPoisonOnSpeculation(node);
case IrOpcode::kWord32PoisonOnSpeculation:
return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node);
case IrOpcode::kWord64PoisonOnSpeculation:
return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node);
case IrOpcode::kStackSlot:
return VisitStackSlot(node);
case IrOpcode::kStackPointerGreaterThan:
@ -2389,30 +2358,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
}
void InstructionSelector::EmitWordPoisonOnSpeculation(Node* node) {
if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
OperandGenerator g(this);
Node* input_node = NodeProperties::GetValueInput(node, 0);
InstructionOperand input = g.UseRegister(input_node);
InstructionOperand output = g.DefineSameAsFirst(node);
Emit(kArchWordPoisonOnSpeculation, output, input);
} else {
EmitIdentity(node);
}
}
void InstructionSelector::VisitWord32PoisonOnSpeculation(Node* node) {
EmitWordPoisonOnSpeculation(node);
}
void InstructionSelector::VisitWord64PoisonOnSpeculation(Node* node) {
EmitWordPoisonOnSpeculation(node);
}
void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) {
EmitWordPoisonOnSpeculation(node);
}
void InstructionSelector::VisitStackPointerGreaterThan(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kStackPointerGreaterThanCondition, node);
@ -3104,45 +3049,24 @@ void InstructionSelector::VisitReturn(Node* ret) {
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
if (NeedsPoisoning(IsSafetyCheckOf(branch->op()))) {
FlagsContinuation cont =
FlagsContinuation::ForBranchAndPoison(kNotEqual, tbranch, fbranch);
VisitWordCompareZero(branch, branch->InputAt(0), &cont);
} else {
FlagsContinuation cont =
FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
VisitWordCompareZero(branch, branch->InputAt(0), &cont);
}
FlagsContinuation cont =
FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
VisitWordCompareZero(branch, branch->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
if (NeedsPoisoning(p.is_safety_check())) {
FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
} else {
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
if (NeedsPoisoning(p.is_safety_check())) {
FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
} else {
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, p.kind(), p.reason(), node->id(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitSelect(Node* node) {
@ -3186,17 +3110,10 @@ void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) {
g.UseImmediate(n.slot()), g.UseImmediate(n.handler())});
}
if (NeedsPoisoning(IsSafetyCheck::kCriticalSafetyCheck)) {
FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
VisitWordCompareZero(node, n.condition(), &cont);
} else {
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
VisitWordCompareZero(node, n.condition(), &cont);
}
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
VisitWordCompareZero(node, n.condition(), &cont);
}
void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
@ -3409,18 +3326,6 @@ void InstructionSelector::SwapShuffleInputs(Node* node) {
}
#endif // V8_ENABLE_WEBASSEMBLY
// static
bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const {
switch (poisoning_level_) {
case PoisoningMitigationLevel::kDontPoison:
return false;
case PoisoningMitigationLevel::kPoisonAll:
return safety_check != IsSafetyCheck::kNoSafetyCheck;
case PoisoningMitigationLevel::kPoisonCriticalOnly:
return safety_check == IsSafetyCheck::kCriticalSafetyCheck;
}
UNREACHABLE();
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -54,13 +54,6 @@ class FlagsContinuation final {
return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
}
static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
BasicBlock* true_block,
BasicBlock* false_block) {
return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
false_block);
}
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimize(
FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
@ -71,16 +64,6 @@ class FlagsContinuation final {
extra_args_count);
}
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimizeAndPoison(
FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
NodeId node_id, FeedbackSource const& feedback, Node* frame_state,
InstructionOperand* extra_args = nullptr, int extra_args_count = 0) {
return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
reason, node_id, feedback, frame_state, extra_args,
extra_args_count);
}
// Creates a new flags continuation for a boolean value.
static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
return FlagsContinuation(condition, result);
@ -98,16 +81,8 @@ class FlagsContinuation final {
}
bool IsNone() const { return mode_ == kFlags_none; }
bool IsBranch() const {
return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
}
bool IsDeoptimize() const {
return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
}
bool IsPoisoned() const {
return mode_ == kFlags_branch_and_poison ||
mode_ == kFlags_deoptimize_and_poison;
}
bool IsBranch() const { return mode_ == kFlags_branch; }
bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
bool IsSet() const { return mode_ == kFlags_set; }
bool IsTrap() const { return mode_ == kFlags_trap; }
bool IsSelect() const { return mode_ == kFlags_select; }
@ -226,7 +201,7 @@ class FlagsContinuation final {
condition_(condition),
true_block_(true_block),
false_block_(false_block) {
DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
DCHECK(mode == kFlags_branch);
DCHECK_NOT_NULL(true_block);
DCHECK_NOT_NULL(false_block);
}
@ -245,7 +220,7 @@ class FlagsContinuation final {
frame_state_or_result_(frame_state),
extra_args_(extra_args),
extra_args_count_(extra_args_count) {
DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
DCHECK(mode == kFlags_deoptimize);
DCHECK_NOT_NULL(frame_state);
}
@ -338,8 +313,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
: kDisableScheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing =
kDisableRootsRelativeAddressing,
PoisoningMitigationLevel poisoning_level =
PoisoningMitigationLevel::kDontPoison,
EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
// Visit code for the entire graph with the included schedule.
@ -443,8 +416,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
bool NeedsPoisoning(IsSafetyCheck safety_check) const;
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
@ -681,8 +652,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
void EmitWordPoisonOnSpeculation(Node* node);
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* call_descriptor, Node* node);
void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
@ -797,7 +766,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
FrameStateInput::Equal>
state_values_cache_;
PoisoningMitigationLevel poisoning_level_;
Frame* frame_;
bool instruction_selection_failed_;
ZoneVector<std::pair<int, int>> instr_origins_;

View File

@ -410,12 +410,8 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os;
case kFlags_branch:
return os << "branch";
case kFlags_branch_and_poison:
return os << "branch_and_poison";
case kFlags_deoptimize:
return os << "deoptimize";
case kFlags_deoptimize_and_poison:
return os << "deoptimize_and_poison";
case kFlags_set:
return os << "set";
case kFlags_trap:

View File

@ -935,8 +935,7 @@ class V8_EXPORT_PRIVATE Instruction final {
bool IsDeoptimizeCall() const {
return arch_opcode() == ArchOpcode::kArchDeoptimize ||
FlagsModeField::decode(opcode()) == kFlags_deoptimize ||
FlagsModeField::decode(opcode()) == kFlags_deoptimize_and_poison;
FlagsModeField::decode(opcode()) == kFlags_deoptimize;
}
bool IsTrap() const {

View File

@ -55,17 +55,6 @@ struct JumpThreadingState {
RpoNumber onstack() { return RpoNumber::FromInt(-2); }
};
bool IsBlockWithBranchPoisoning(InstructionSequence* code,
InstructionBlock* block) {
if (block->PredecessorCount() != 1) return false;
RpoNumber pred_rpo = (block->predecessors())[0];
const InstructionBlock* pred = code->InstructionBlockAt(pred_rpo);
if (pred->code_start() == pred->code_end()) return false;
Instruction* instr = code->InstructionAt(pred->code_end() - 1);
FlagsMode mode = FlagsModeField::decode(instr->opcode());
return mode == kFlags_branch_and_poison;
}
} // namespace
bool JumpThreading::ComputeForwarding(Zone* local_zone,
@ -92,85 +81,80 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
TRACE("jt [%d] B%d\n", static_cast<int>(stack.size()),
block->rpo_number().ToInt());
RpoNumber fw = block->rpo_number();
if (!IsBlockWithBranchPoisoning(code, block)) {
bool fallthru = true;
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
if (!instr->AreMovesRedundant()) {
// can't skip instructions with non redundant moves.
TRACE(" parallel move\n");
fallthru = false;
} else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
// can't skip instructions with flags continuations.
TRACE(" flags\n");
fallthru = false;
} else if (instr->IsNop()) {
// skip nops.
TRACE(" nop\n");
continue;
} else if (instr->arch_opcode() == kArchJmp) {
// try to forward the jump instruction.
TRACE(" jmp\n");
// if this block deconstructs the frame, we can't forward it.
// TODO(mtrofin): we can still forward if we end up building
// the frame at start. So we should move the decision of whether
// to build a frame or not in the register allocator, and trickle it
// here and to the code generator.
if (frame_at_start || !(block->must_deconstruct_frame() ||
block->must_construct_frame())) {
fw = code->InputRpo(instr, 0);
}
fallthru = false;
} else if (instr->IsRet()) {
TRACE(" ret\n");
if (fallthru) {
CHECK_IMPLIES(block->must_construct_frame(),
block->must_deconstruct_frame());
// Only handle returns with immediate/constant operands, since
// they must always be the same for all returns in a function.
// Dynamic return values might use different registers at
// different return sites and therefore cannot be shared.
if (instr->InputAt(0)->IsImmediate()) {
int32_t return_size = ImmediateOperand::cast(instr->InputAt(0))
->inline_int32_value();
// Instructions can be shared only for blocks that share
// the same |must_deconstruct_frame| attribute.
if (block->must_deconstruct_frame()) {
if (empty_deconstruct_frame_return_block ==
RpoNumber::Invalid()) {
empty_deconstruct_frame_return_block = block->rpo_number();
empty_deconstruct_frame_return_size = return_size;
} else if (empty_deconstruct_frame_return_size ==
return_size) {
fw = empty_deconstruct_frame_return_block;
block->clear_must_deconstruct_frame();
}
} else {
if (empty_no_deconstruct_frame_return_block ==
RpoNumber::Invalid()) {
empty_no_deconstruct_frame_return_block =
block->rpo_number();
empty_no_deconstruct_frame_return_size = return_size;
} else if (empty_no_deconstruct_frame_return_size ==
return_size) {
fw = empty_no_deconstruct_frame_return_block;
}
bool fallthru = true;
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
if (!instr->AreMovesRedundant()) {
// can't skip instructions with non redundant moves.
TRACE(" parallel move\n");
fallthru = false;
} else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
// can't skip instructions with flags continuations.
TRACE(" flags\n");
fallthru = false;
} else if (instr->IsNop()) {
// skip nops.
TRACE(" nop\n");
continue;
} else if (instr->arch_opcode() == kArchJmp) {
// try to forward the jump instruction.
TRACE(" jmp\n");
// if this block deconstructs the frame, we can't forward it.
// TODO(mtrofin): we can still forward if we end up building
// the frame at start. So we should move the decision of whether
// to build a frame or not in the register allocator, and trickle it
// here and to the code generator.
if (frame_at_start || !(block->must_deconstruct_frame() ||
block->must_construct_frame())) {
fw = code->InputRpo(instr, 0);
}
fallthru = false;
} else if (instr->IsRet()) {
TRACE(" ret\n");
if (fallthru) {
CHECK_IMPLIES(block->must_construct_frame(),
block->must_deconstruct_frame());
// Only handle returns with immediate/constant operands, since
// they must always be the same for all returns in a function.
// Dynamic return values might use different registers at
// different return sites and therefore cannot be shared.
if (instr->InputAt(0)->IsImmediate()) {
int32_t return_size = ImmediateOperand::cast(instr->InputAt(0))
->inline_int32_value();
// Instructions can be shared only for blocks that share
// the same |must_deconstruct_frame| attribute.
if (block->must_deconstruct_frame()) {
if (empty_deconstruct_frame_return_block ==
RpoNumber::Invalid()) {
empty_deconstruct_frame_return_block = block->rpo_number();
empty_deconstruct_frame_return_size = return_size;
} else if (empty_deconstruct_frame_return_size == return_size) {
fw = empty_deconstruct_frame_return_block;
block->clear_must_deconstruct_frame();
}
} else {
if (empty_no_deconstruct_frame_return_block ==
RpoNumber::Invalid()) {
empty_no_deconstruct_frame_return_block = block->rpo_number();
empty_no_deconstruct_frame_return_size = return_size;
} else if (empty_no_deconstruct_frame_return_size ==
return_size) {
fw = empty_no_deconstruct_frame_return_block;
}
}
}
fallthru = false;
} else {
// can't skip other instructions.
TRACE(" other\n");
fallthru = false;
}
break;
}
if (fallthru) {
int next = 1 + block->rpo_number().ToInt();
if (next < code->InstructionBlockCount())
fw = RpoNumber::FromInt(next);
fallthru = false;
} else {
// can't skip other instructions.
TRACE(" other\n");
fallthru = false;
}
break;
}
if (fallthru) {
int next = 1 + block->rpo_number().ToInt();
if (next < code->InstructionBlockCount()) fw = RpoNumber::FromInt(next);
}
state.Forward(fw);
}
@ -225,7 +209,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
FlagsMode mode = FlagsModeField::decode(instr->opcode());
if (mode == kFlags_branch || mode == kFlags_branch_and_poison) {
if (mode == kFlags_branch) {
fallthru = false; // branches don't fall through to the next block.
} else if (instr->arch_opcode() == kArchJmp ||
instr->arch_opcode() == kArchRet) {

View File

@ -313,16 +313,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
<< "\""; \
UNIMPLEMENTED();
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
MipsOperandConverter const& i) {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
}
}
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@ -614,31 +604,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerSystemPointer - 1))
__ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
kScratchReg);
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kBitsPerSystemPointer - 1);
__ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kSpeculationPoisonRegister);
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
__ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
__ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
__ And(sp, sp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@ -938,10 +903,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
case kArchWordPoisonOnSpeculation:
__ And(i.OutputRegister(), i.InputRegister(0),
kSpeculationPoisonRegister);
break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@ -1541,30 +1502,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLb:
__ lb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSb:
__ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMipsLhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLh:
__ lh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSh:
__ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@ -1574,11 +1529,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLw:
__ lw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSw:
__ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@ -3727,85 +3680,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
MipsOperandConverter i(this, instr);
condition = NegateFlagsCondition(condition);
switch (instr->arch_opcode()) {
case kMipsCmp: {
__ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
i.InputOperand(1),
FlagsConditionToConditionCmp(condition));
}
return;
case kMipsTst: {
switch (condition) {
case kEqual:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
break;
case kNotEqual:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg);
break;
default:
UNREACHABLE();
}
}
return;
case kMipsAddOvf:
case kMipsSubOvf: {
// Overflow occurs if overflow register is negative
__ Slt(kScratchReg2, kScratchReg, zero_reg);
switch (condition) {
case kOverflow:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg2);
break;
case kNotOverflow:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
}
}
return;
case kMipsMulOvf: {
// Overflow occurs if overflow register is not zero
switch (condition) {
case kOverflow:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg);
break;
case kNotOverflow:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
}
}
return;
case kMipsCmpS:
case kMipsCmpD: {
bool predicate;
FlagsConditionToConditionCmpFPU(&predicate, condition);
if (predicate) {
__ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
} else {
__ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
}
}
return;
default:
UNREACHABLE();
}
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@ -4130,7 +4004,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();

View File

@ -1444,8 +1444,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
AdduLatency(false) + AndLatency(false) + BranchShortLatency() + 1 +
SubuLatency() + AdduLatency();
}
case kArchWordPoisonOnSpeculation:
return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:

View File

@ -375,10 +375,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
@ -393,8 +389,6 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();

View File

@ -321,16 +321,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
UNREACHABLE();
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
MipsOperandConverter const& i) {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
}
}
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@ -577,31 +567,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerSystemPointer - 1))
__ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
kScratchReg);
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kBitsPerSystemPointer - 1);
__ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kSpeculationPoisonRegister);
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
__ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
__ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
__ And(sp, sp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@ -900,10 +865,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
case kArchWordPoisonOnSpeculation:
__ And(i.OutputRegister(), i.InputRegister(0),
kSpeculationPoisonRegister);
break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@ -1646,30 +1607,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@ -1679,27 +1634,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ld:
__ Ld(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Uld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@ -3904,104 +3853,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
MipsOperandConverter i(this, instr);
condition = NegateFlagsCondition(condition);
switch (instr->arch_opcode()) {
case kMips64Cmp: {
__ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
i.InputOperand(1),
FlagsConditionToConditionCmp(condition));
}
return;
case kMips64Tst: {
switch (condition) {
case kEqual:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
break;
case kNotEqual:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg);
break;
default:
UNREACHABLE();
}
}
return;
case kMips64Dadd:
case kMips64Dsub: {
// Check for overflow creates 1 or 0 for result.
__ dsrl32(kScratchReg, i.OutputRegister(), 31);
__ srl(kScratchReg2, i.OutputRegister(), 31);
__ xor_(kScratchReg2, kScratchReg, kScratchReg2);
switch (condition) {
case kOverflow:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg2);
break;
case kNotOverflow:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
}
}
return;
case kMips64DaddOvf:
case kMips64DsubOvf: {
// Overflow occurs if overflow register is negative
__ Slt(kScratchReg2, kScratchReg, zero_reg);
switch (condition) {
case kOverflow:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg2);
break;
case kNotOverflow:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
}
}
return;
case kMips64MulOvf: {
// Overflow occurs if overflow register is not zero
switch (condition) {
case kOverflow:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg);
break;
case kNotOverflow:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
}
}
return;
case kMips64CmpS:
case kMips64CmpD: {
bool predicate;
FlagsConditionToConditionCmpFPU(&predicate, condition);
if (predicate) {
__ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
} else {
__ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
}
}
return;
default:
UNREACHABLE();
}
}
#undef UNSUPPORTED_COND
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@ -4340,7 +4191,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();

View File

@ -1352,8 +1352,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return DadduLatency(false) + AndLatency(false) + AssertLatency() +
DadduLatency(false) + AndLatency(false) + BranchShortLatency() +
1 + DsubuLatency() + DadduLatency();
case kArchWordPoisonOnSpeculation:
return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:

View File

@ -515,16 +515,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
EmitLoad(this, node, opcode);
}
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@ -2041,8 +2035,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad() ||
m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
} else {

View File

@ -38,9 +38,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
RCBit OutputRCBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
case kFlags_branch_and_poison:
case kFlags_deoptimize:
case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
case kFlags_select:
@ -289,15 +287,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
PPCOperandConverter const& i) {
const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->and_(value, value, kSpeculationPoisonRegister);
}
}
} // namespace
#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round) \
@ -777,25 +766,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, cr0);
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
Register scratch = kScratchReg;
__ ComputeCodeStartAddress(scratch);
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
__ CmpS64(kJavaScriptCallCodeStartRegister, scratch);
__ li(scratch, Operand::Zero());
__ notx(kSpeculationPoisonRegister, scratch);
__ isel(eq, kSpeculationPoisonRegister, kSpeculationPoisonRegister, scratch);
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
__ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
__ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
__ and_(sp, sp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@ -1164,10 +1134,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()), r0);
break;
}
case kArchWordPoisonOnSpeculation:
__ and_(i.OutputRegister(), i.InputRegister(0),
kSpeculationPoisonRegister);
break;
case kPPC_Peek: {
int reverse_slot = i.InputInt32(0);
int offset =
@ -1969,33 +1935,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(lha, lhax);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(lwa, lwax);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_LoadWord64:
ASSEMBLE_LOAD_INTEGER(ld, ldx);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#endif
case kPPC_LoadFloat32:
@ -2144,7 +2103,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_LoadByteRev32: {
ASSEMBLE_LOAD_INTEGER_RR(lwbrx);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kPPC_StoreByteRev32: {
@ -2170,7 +2128,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_LoadByteRev64: {
ASSEMBLE_LOAD_INTEGER_RR(ldbrx);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kPPC_StoreByteRev64: {
@ -3801,21 +3758,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(John) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
condition == kOverflow || condition == kNotOverflow) {
return;
}
ArchOpcode op = instr->arch_opcode();
condition = NegateFlagsCondition(condition);
__ li(kScratchReg, Operand::Zero());
__ isel(FlagsConditionToCondition(condition, op), kSpeculationPoisonRegister,
kScratchReg, kSpeculationPoisonRegister, cr0);
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@ -4081,7 +4023,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();

View File

@ -229,11 +229,6 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
}
if (node->opcode() == IrOpcode::kPoisonedLoad &&
poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
node->opcode() == IrOpcode::kWord64AtomicLoad);
@ -252,8 +247,6 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();

View File

@ -307,17 +307,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
UNREACHABLE();
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
RiscvOperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
}
}
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@ -570,31 +559,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerSystemPointer - 1))
__ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, kScratchReg);
__ Sub32(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ Sub32(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
kScratchReg);
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ Sra64(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kBitsPerSystemPointer - 1);
__ Nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kSpeculationPoisonRegister);
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
__ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
__ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
__ And(sp, sp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@ -887,10 +851,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchWordPoisonOnSpeculation:
__ And(i.OutputRegister(), i.InputRegister(0),
kSpeculationPoisonRegister);
break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@ -1553,30 +1513,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvLbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kRiscvLhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@ -1586,27 +1540,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvLw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLd:
__ Ld(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@ -2011,110 +1959,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
RiscvOperandConverter i(this, instr);
condition = NegateFlagsCondition(condition);
switch (instr->arch_opcode()) {
case kRiscvCmp: {
__ CompareI(kScratchReg, i.InputRegister(0), i.InputOperand(1),
FlagsConditionToConditionCmp(condition));
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
}
return;
case kRiscvCmpZero: {
__ CompareI(kScratchReg, i.InputRegister(0), Operand(zero_reg),
FlagsConditionToConditionCmp(condition));
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
}
return;
case kRiscvTst: {
switch (condition) {
case kEqual:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
break;
case kNotEqual:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg);
break;
default:
UNREACHABLE();
}
}
return;
case kRiscvAdd64:
case kRiscvSub64: {
// Check for overflow creates 1 or 0 for result.
__ Srl64(kScratchReg, i.OutputRegister(), 63);
__ Srl32(kScratchReg2, i.OutputRegister(), 31);
__ Xor(kScratchReg2, kScratchReg, kScratchReg2);
switch (condition) {
case kOverflow:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg2);
break;
case kNotOverflow:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
}
}
return;
case kRiscvAddOvf64:
case kRiscvSubOvf64: {
// Overflow occurs if overflow register is negative
__ Slt(kScratchReg2, kScratchReg, zero_reg);
switch (condition) {
case kOverflow:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg2);
break;
case kNotOverflow:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
}
}
return;
case kRiscvMulOvf32: {
// Overflow occurs if overflow register is not zero
switch (condition) {
case kOverflow:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg);
break;
case kNotOverflow:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
}
}
return;
case kRiscvCmpS:
case kRiscvCmpD: {
bool predicate;
FlagsConditionToConditionCmpFPU(&predicate, condition);
if (predicate) {
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
} else {
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
}
}
return;
default:
UNREACHABLE();
}
}
#undef UNSUPPORTED_COND
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@ -2489,7 +2333,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();

View File

@ -1169,8 +1169,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return Add64Latency(false) + AndLatency(false) + AssertLatency() +
Add64Latency(false) + AndLatency(false) + BranchShortLatency() +
1 + Sub64Latency() + Add64Latency();
case kArchWordPoisonOnSpeculation:
return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:

View File

@ -489,16 +489,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
opcode |= MiscField::encode(kMemoryAccessPoisoned);
}
EmitLoad(this, node, opcode);
}
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@ -1827,8 +1821,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad() ||
m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
} else {

View File

@ -985,15 +985,6 @@ void AdjustStackPointerForTailCall(
}
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
S390OperandConverter const& i) {
const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
}
}
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
@ -1071,25 +1062,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne);
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
Register scratch = r1;
__ ComputeCodeStartAddress(scratch);
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
__ mov(kSpeculationPoisonRegister, Operand::Zero());
__ mov(r0, Operand(-1));
__ CmpS64(kJavaScriptCallCodeStartRegister, scratch);
__ LoadOnConditionP(eq, kSpeculationPoisonRegister, r0);
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
__ AndP(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
__ AndP(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
__ AndP(sp, sp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@ -1395,10 +1367,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()));
break;
}
case kArchWordPoisonOnSpeculation:
DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
__ AndP(i.InputRegister(0), kSpeculationPoisonRegister);
break;
case kS390_Peek: {
int reverse_slot = i.InputInt32(0);
int offset =
@ -2155,7 +2123,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(LoadS8);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_BitcastFloat32ToInt32:
ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadU32), nullInstr);
@ -2173,35 +2140,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kS390_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(LoadU8);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(LoadU16);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(LoadS16);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(LoadU32);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(LoadS32);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16:
ASSEMBLE_LOAD_INTEGER(lrvh);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse32:
ASSEMBLE_LOAD_INTEGER(lrv);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse64:
ASSEMBLE_LOAD_INTEGER(lrvg);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16RR:
__ lrvr(i.OutputRegister(), i.InputRegister(0));
@ -2238,7 +2197,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadAndTestWord32: {
ASSEMBLE_LOADANDTEST32(ltr, lt_z);
@ -2258,7 +2216,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingMode mode = kMode_None;
MemOperand operand = i.MemoryOperand(&mode);
__ vl(i.OutputSimd128Register(), operand, Condition(0));
EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kS390_StoreWord8:
@ -3541,20 +3498,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(John) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
condition == kOverflow || condition == kNotOverflow) {
return;
}
condition = NegateFlagsCondition(condition);
__ mov(r0, Operand::Zero());
__ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
kSpeculationPoisonRegister, r0);
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@ -3781,7 +3724,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();

View File

@ -704,15 +704,9 @@ void InstructionSelector::VisitLoad(Node* node) {
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(opcode, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();

View File

@ -569,16 +569,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
#endif // V8_ENABLE_WEBASSEMBLY
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
X64OperandConverter const& i) {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->andq(value, kSpeculationPoisonRegister);
}
}
} // namespace
#define ASSEMBLE_UNOP(asm_instr) \
@ -1019,22 +1009,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, not_zero);
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Set a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
__ ComputeCodeStartAddress(rbx);
__ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
__ cmpq(kJavaScriptCallCodeStartRegister, rbx);
__ Move(rbx, -1);
__ cmovq(equal, kSpeculationPoisonRegister, rbx);
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
__ andq(kJSFunctionRegister, kSpeculationPoisonRegister);
__ andq(kContextRegister, kSpeculationPoisonRegister);
__ andq(rsp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@ -1052,11 +1026,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
}
__ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@ -1078,19 +1048,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ near_call(wasm_code, constant.rmode());
} else {
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(wasm_code, constant.rmode());
} else {
__ Call(wasm_code, constant.rmode());
}
__ Call(wasm_code, constant.rmode());
}
} else {
Register reg = i.InputRegister(0);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
}
__ call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@ -1107,12 +1068,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ jmp(kScratchRegister);
}
} else {
Register reg = i.InputRegister(0);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
}
__ jmp(i.InputRegister(0));
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@ -1130,11 +1086,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
}
__ jmp(reg);
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@ -1147,11 +1099,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
}
__ jmp(reg);
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@ -1368,10 +1316,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DetermineStubCallMode(), kTaggedSize);
break;
}
case kArchWordPoisonOnSpeculation:
DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
__ andq(i.InputRegister(0), kSpeculationPoisonRegister);
break;
case kX64MFence:
__ mfence();
break;
@ -2180,24 +2124,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbq);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movb: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@ -2214,20 +2154,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt8Size);
}
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movsxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxwq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@ -2237,7 +2174,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movw: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@ -2254,7 +2190,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt16Size);
}
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movl:
@ -2288,12 +2223,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DetermineStubCallMode(), kInt32Size);
}
}
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxlq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxlq);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64MovqDecompressTaggedSigned: {
CHECK(instr->HasOutput());
@ -2301,7 +2234,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ DecompressTaggedSigned(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kTaggedSize);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqDecompressTaggedPointer: {
@ -2310,7 +2242,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ DecompressTaggedPointer(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kTaggedSize);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqDecompressAnyTagged: {
@ -2319,7 +2250,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ DecompressAnyTagged(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kTaggedSize);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqCompressTagged: {
@ -2361,7 +2291,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DetermineStubCallMode(), kInt64Size);
}
}
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movss:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@ -2376,17 +2305,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Movsd: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
// If we have to poison the loaded value, we load into a general
// purpose register first, mask it with the poison, and move the
// value from the general purpose register into the double register.
__ movq(kScratchRegister, i.MemoryOperand());
__ andq(kScratchRegister, kSpeculationPoisonRegister);
__ Movq(i.OutputDoubleRegister(), kScratchRegister);
} else {
__ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
}
__ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@ -4449,19 +4368,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
condition = NegateFlagsCondition(condition);
__ Move(kScratchRegister, 0);
__ cmovq(FlagsConditionToCondition(condition), kSpeculationPoisonRegister,
kScratchRegister);
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
Label::Distance flabel_distance =
@ -4703,7 +4609,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();

View File

@ -471,9 +471,6 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
code |= AccessModeField::encode(kMemoryAccessProtected);
} else if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
code |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
}
@ -484,8 +481,6 @@ void InstructionSelector::VisitLoad(Node* node) {
VisitLoad(node, node, GetLoadOpcode(load_rep));
}
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitStore(Node* node) {
@ -1502,8 +1497,7 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
}
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
case IrOpcode::kPoisonedLoad: {
case IrOpcode::kProtectedLoad: {
// The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
// zero-extend to 64-bit on x64, so the zero-extension is a no-op.
LoadRepresentation load_rep = LoadRepresentationOf(node->op());

View File

@ -135,7 +135,6 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
bool condition_value;
// If we know the condition we can discard the branch.
if (from_input.LookupCondition(condition, &branch, &condition_value)) {
MarkAsSafetyCheckIfNeeded(branch, node);
for (Node* const use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfTrue:
@ -215,7 +214,6 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
Node* branch;
// If we know the condition we can discard the branch.
if (conditions.LookupCondition(condition, &branch, &condition_value)) {
MarkAsSafetyCheckIfNeeded(branch, node);
if (condition_is_true == condition_value) {
// We don't update the conditions here, because we're replacing {node}
// with the {control} node that already contains the right information.
@ -410,21 +408,6 @@ bool BranchElimination::ControlPathConditions::BlocksAndConditionsInvariant() {
}
#endif
void BranchElimination::MarkAsSafetyCheckIfNeeded(Node* branch, Node* node) {
// Check if {branch} is dead because we might have a stale side-table entry.
if (!branch->IsDead() && branch->opcode() != IrOpcode::kDead &&
branch->opcode() != IrOpcode::kTrapIf &&
branch->opcode() != IrOpcode::kTrapUnless) {
IsSafetyCheck branch_safety = IsSafetyCheckOf(branch->op());
IsSafetyCheck combined_safety =
CombineSafetyChecks(branch_safety, IsSafetyCheckOf(node->op()));
if (branch_safety != combined_safety) {
NodeProperties::ChangeOp(
branch, common()->MarkAsSafetyCheck(branch->op(), combined_safety));
}
}
}
Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
Isolate* BranchElimination::isolate() const { return jsgraph()->isolate(); }

View File

@ -114,7 +114,6 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction UpdateConditions(Node* node, ControlPathConditions prev_conditions,
Node* current_condition, Node* current_branch,
bool is_true_branch, bool in_new_block);
void MarkAsSafetyCheckIfNeeded(Node* branch, Node* node);
Node* dead() const { return dead_; }
Graph* graph() const;

View File

@ -141,9 +141,8 @@ class BytecodeGraphBuilder {
Node* NewIfDefault() { return NewNode(common()->IfDefault()); }
Node* NewMerge() { return NewNode(common()->Merge(1), true); }
Node* NewLoop() { return NewNode(common()->Loop(1), true); }
Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone,
IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck) {
return NewNode(common()->Branch(hint, is_safety_check), condition);
Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
return NewNode(common()->Branch(hint), condition);
}
Node* NewSwitch(Node* condition, int control_output_count) {
return NewNode(common()->Switch(control_output_count), condition);
@ -3959,7 +3958,7 @@ void BytecodeGraphBuilder::BuildJump() {
}
void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfTrue();
@ -3971,7 +3970,7 @@ void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
}
void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@ -3997,8 +3996,7 @@ void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) {
}
void BytecodeGraphBuilder::BuildJumpIfFalse() {
NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
IsSafetyCheck::kNoSafetyCheck);
NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@ -4012,8 +4010,7 @@ void BytecodeGraphBuilder::BuildJumpIfFalse() {
}
void BytecodeGraphBuilder::BuildJumpIfTrue() {
NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
IsSafetyCheck::kNoSafetyCheck);
NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfTrue();

View File

@ -48,8 +48,7 @@ static_assert(
CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
CodeKind kind, const char* name, PoisoningMitigationLevel poisoning_level,
Builtin builtin)
CodeKind kind, const char* name, Builtin builtin)
// TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
// bytecode handlers?
: CodeAssemblerState(
@ -57,29 +56,26 @@ CodeAssemblerState::CodeAssemblerState(
Linkage::GetStubCallDescriptor(
zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties),
kind, name, poisoning_level, builtin) {}
kind, name, builtin) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
int parameter_count, CodeKind kind,
const char* name,
PoisoningMitigationLevel poisoning_level,
Builtin builtin)
const char* name, Builtin builtin)
: CodeAssemblerState(
isolate, zone,
Linkage::GetJSCallDescriptor(zone, false, parameter_count,
CallDescriptor::kCanUseRoots),
kind, name, poisoning_level, builtin) {}
kind, name, builtin) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor,
CodeKind kind, const char* name,
PoisoningMitigationLevel poisoning_level,
Builtin builtin)
: raw_assembler_(new RawMachineAssembler(
isolate, zone->New<Graph>(zone), call_descriptor,
MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements(), poisoning_level)),
InstructionSelector::AlignmentRequirements())),
kind_(kind),
name_(name),
builtin_(builtin),
@ -169,10 +165,6 @@ bool CodeAssembler::Word32ShiftIsSafe() const {
return raw_assembler()->machine()->Word32ShiftIsSafe();
}
PoisoningMitigationLevel CodeAssembler::poisoning_level() const {
return raw_assembler()->poisoning_level();
}
// static
Handle<Code> CodeAssembler::GenerateCode(
CodeAssemblerState* state, const AssemblerOptions& options,
@ -187,7 +179,7 @@ Handle<Code> CodeAssembler::GenerateCode(
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), graph, state->jsgraph_,
rasm->source_positions(), state->kind_, state->name_,
state->builtin_, rasm->poisoning_level(), options, profile_data)
state->builtin_, options, profile_data)
.ToHandleChecked();
state->code_generated_ = true;
@ -565,15 +557,6 @@ TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() {
return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer());
}
TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation(TNode<Object> value) {
return UncheckedCast<Object>(
raw_assembler()->TaggedPoisonOnSpeculation(value));
}
TNode<WordT> CodeAssembler::WordPoisonOnSpeculation(TNode<WordT> value) {
return UncheckedCast<WordT>(raw_assembler()->WordPoisonOnSpeculation(value));
}
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
TNode<ResType> CodeAssembler::name(TNode<Arg1Type> a, TNode<Arg2Type> b) { \
return UncheckedCast<ResType>(raw_assembler()->name(a, b)); \
@ -677,27 +660,23 @@ TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(TNode<Float32T> value) {
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
Node* CodeAssembler::Load(MachineType type, Node* base,
LoadSensitivity needs_poisoning) {
return raw_assembler()->Load(type, base, needs_poisoning);
Node* CodeAssembler::Load(MachineType type, Node* base) {
return raw_assembler()->Load(type, base);
}
Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset,
LoadSensitivity needs_poisoning) {
return raw_assembler()->Load(type, base, offset, needs_poisoning);
Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset) {
return raw_assembler()->Load(type, base, offset);
}
TNode<Object> CodeAssembler::LoadFullTagged(Node* base,
LoadSensitivity needs_poisoning) {
return BitcastWordToTagged(Load<RawPtrT>(base, needs_poisoning));
TNode<Object> CodeAssembler::LoadFullTagged(Node* base) {
return BitcastWordToTagged(Load<RawPtrT>(base));
}
TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset,
LoadSensitivity needs_poisoning) {
TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset) {
// Please use LoadFromObject(MachineType::MapInHeader(), object,
// IntPtrConstant(-kHeapObjectTag)) instead.
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
return BitcastWordToTagged(Load<RawPtrT>(base, offset, needs_poisoning));
return BitcastWordToTagged(Load<RawPtrT>(base, offset));
}
Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,

View File

@ -725,32 +725,22 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<RawPtrT> LoadFramePointer();
TNode<RawPtrT> LoadParentFramePointer();
// Poison |value| on speculative paths.
TNode<Object> TaggedPoisonOnSpeculation(TNode<Object> value);
TNode<WordT> WordPoisonOnSpeculation(TNode<WordT> value);
// Load raw memory location.
Node* Load(MachineType type, Node* base,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
Node* Load(MachineType type, Node* base);
template <class Type>
TNode<Type> Load(MachineType type, TNode<RawPtr<Type>> base) {
DCHECK(
IsSubtype(type.representation(), MachineRepresentationOf<Type>::value));
return UncheckedCast<Type>(Load(type, static_cast<Node*>(base)));
}
Node* Load(MachineType type, Node* base, Node* offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
Node* Load(MachineType type, Node* base, Node* offset);
template <class Type>
TNode<Type> Load(Node* base,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return UncheckedCast<Type>(
Load(MachineTypeOf<Type>::value, base, needs_poisoning));
TNode<Type> Load(Node* base) {
return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base));
}
template <class Type>
TNode<Type> Load(Node* base, TNode<WordT> offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return UncheckedCast<Type>(
Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning));
TNode<Type> Load(Node* base, TNode<WordT> offset) {
return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base, offset));
}
template <class Type>
TNode<Type> AtomicLoad(TNode<RawPtrT> base, TNode<WordT> offset) {
@ -761,11 +751,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Type> AtomicLoad64(TNode<RawPtrT> base, TNode<WordT> offset);
// Load uncompressed tagged value from (most likely off JS heap) memory
// location.
TNode<Object> LoadFullTagged(
Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
TNode<Object> LoadFullTagged(
Node* base, TNode<IntPtrT> offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
TNode<Object> LoadFullTagged(Node* base);
TNode<Object> LoadFullTagged(Node* base, TNode<IntPtrT> offset);
Node* LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset);
@ -1312,7 +1299,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void UnregisterCallGenerationCallbacks();
bool Word32ShiftIsSafe() const;
PoisoningMitigationLevel poisoning_level() const;
bool IsJSFunctionCall() const;
@ -1595,13 +1581,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeAssemblerState(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor, CodeKind kind,
const char* name, PoisoningMitigationLevel poisoning_level,
Builtin builtin = Builtin::kNoBuiltinId);
const char* name, Builtin builtin = Builtin::kNoBuiltinId);
// Create with JSCall linkage.
CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
CodeKind kind, const char* name,
PoisoningMitigationLevel poisoning_level,
Builtin builtin = Builtin::kNoBuiltinId);
~CodeAssemblerState();
@ -1628,8 +1612,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, CodeKind kind,
const char* name, PoisoningMitigationLevel poisoning_level,
Builtin builtin);
const char* name, Builtin builtin);
void PushExceptionHandler(CodeAssemblerExceptionHandlerLabel* label);
void PopExceptionHandler();

View File

@ -28,18 +28,6 @@ std::ostream& operator<<(std::ostream& os, BranchHint hint) {
UNREACHABLE();
}
std::ostream& operator<<(std::ostream& os, IsSafetyCheck is_safety_check) {
switch (is_safety_check) {
case IsSafetyCheck::kCriticalSafetyCheck:
return os << "CriticalSafetyCheck";
case IsSafetyCheck::kSafetyCheck:
return os << "SafetyCheck";
case IsSafetyCheck::kNoSafetyCheck:
return os << "NoSafetyCheck";
}
UNREACHABLE();
}
std::ostream& operator<<(std::ostream& os, TrapId trap_id) {
switch (trap_id) {
#define TRAP_CASE(Name) \
@ -59,22 +47,12 @@ TrapId TrapIdOf(const Operator* const op) {
return OpParameter<TrapId>(op);
}
std::ostream& operator<<(std::ostream& os, BranchOperatorInfo info) {
return os << info.hint << ", " << info.is_safety_check;
}
const BranchOperatorInfo& BranchOperatorInfoOf(const Operator* const op) {
DCHECK_EQ(IrOpcode::kBranch, op->opcode());
return OpParameter<BranchOperatorInfo>(op);
}
BranchHint BranchHintOf(const Operator* const op) {
switch (op->opcode()) {
case IrOpcode::kBranch:
return BranchOperatorInfoOf(op).hint;
case IrOpcode::kIfValue:
return IfValueParametersOf(op).hint();
case IrOpcode::kIfDefault:
case IrOpcode::kBranch:
return OpParameter<BranchHint>(op);
default:
UNREACHABLE();
@ -90,8 +68,7 @@ int ValueInputCountOfReturn(Operator const* const op) {
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() &&
lhs.feedback() == rhs.feedback() &&
lhs.is_safety_check() == rhs.is_safety_check();
lhs.feedback() == rhs.feedback();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@ -100,13 +77,11 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
size_t hash_value(DeoptimizeParameters p) {
FeedbackSource::Hash feebdack_hash;
return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()),
p.is_safety_check());
return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()));
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
return os << p.kind() << ", " << p.reason() << ", " << p.is_safety_check()
<< ", " << p.feedback();
return os << p.kind() << ", " << p.reason() << ", " << p.feedback();
}
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
@ -117,32 +92,6 @@ DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
return OpParameter<DeoptimizeParameters>(op);
}
IsSafetyCheck IsSafetyCheckOf(const Operator* op) {
if (op->opcode() == IrOpcode::kBranch) {
return BranchOperatorInfoOf(op).is_safety_check;
}
return DeoptimizeParametersOf(op).is_safety_check();
}
const Operator* CommonOperatorBuilder::MarkAsSafetyCheck(
const Operator* op, IsSafetyCheck safety_check) {
if (op->opcode() == IrOpcode::kBranch) {
BranchOperatorInfo info = BranchOperatorInfoOf(op);
if (info.is_safety_check == safety_check) return op;
return Branch(info.hint, safety_check);
}
DeoptimizeParameters p = DeoptimizeParametersOf(op);
if (p.is_safety_check() == safety_check) return op;
switch (op->opcode()) {
case IrOpcode::kDeoptimizeIf:
return DeoptimizeIf(p.kind(), p.reason(), p.feedback(), safety_check);
case IrOpcode::kDeoptimizeUnless:
return DeoptimizeUnless(p.kind(), p.reason(), p.feedback(), safety_check);
default:
UNREACHABLE();
}
}
const Operator* CommonOperatorBuilder::DelayedStringConstant(
const StringConstantBase* str) {
return zone()->New<Operator1<const StringConstantBase*>>(
@ -478,16 +427,10 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
#define CACHED_LOOP_EXIT_VALUE_LIST(V) V(kTagged)
#define CACHED_BRANCH_LIST(V) \
V(None, CriticalSafetyCheck) \
V(True, CriticalSafetyCheck) \
V(False, CriticalSafetyCheck) \
V(None, SafetyCheck) \
V(True, SafetyCheck) \
V(False, SafetyCheck) \
V(None, NoSafetyCheck) \
V(True, NoSafetyCheck) \
V(False, NoSafetyCheck)
#define CACHED_BRANCH_LIST(V) \
V(None) \
V(True) \
V(False)
#define CACHED_RETURN_LIST(V) \
V(1) \
@ -541,28 +484,22 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
#define CACHED_DEOPTIMIZE_IF_LIST(V) \
V(Eager, DivisionByZero, NoSafetyCheck) \
V(Eager, DivisionByZero, SafetyCheck) \
V(Eager, Hole, NoSafetyCheck) \
V(Eager, Hole, SafetyCheck) \
V(Eager, MinusZero, NoSafetyCheck) \
V(Eager, MinusZero, SafetyCheck) \
V(Eager, Overflow, NoSafetyCheck) \
V(Eager, Overflow, SafetyCheck) \
V(Eager, Smi, SafetyCheck)
#define CACHED_DEOPTIMIZE_IF_LIST(V) \
V(Eager, DivisionByZero) \
V(Eager, Hole) \
V(Eager, MinusZero) \
V(Eager, Overflow) \
V(Eager, Smi)
#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
V(Eager, LostPrecision, NoSafetyCheck) \
V(Eager, LostPrecision, SafetyCheck) \
V(Eager, LostPrecisionOrNaN, NoSafetyCheck) \
V(Eager, LostPrecisionOrNaN, SafetyCheck) \
V(Eager, NotAHeapNumber, SafetyCheck) \
V(Eager, NotANumberOrOddball, SafetyCheck) \
V(Eager, NotASmi, SafetyCheck) \
V(Eager, OutOfBounds, SafetyCheck) \
V(Eager, WrongInstanceType, SafetyCheck) \
V(Eager, WrongMap, SafetyCheck)
#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
V(Eager, LostPrecision) \
V(Eager, LostPrecisionOrNaN) \
V(Eager, NotAHeapNumber) \
V(Eager, NotANumberOrOddball) \
V(Eager, NotASmi) \
V(Eager, OutOfBounds) \
V(Eager, WrongInstanceType) \
V(Eager, WrongMap)
#define CACHED_DYNAMIC_CHECK_MAPS_LIST(V) \
V(DynamicCheckMaps) \
@ -668,18 +605,17 @@ struct CommonOperatorGlobalCache final {
CACHED_RETURN_LIST(CACHED_RETURN)
#undef CACHED_RETURN
template <BranchHint hint, IsSafetyCheck is_safety_check>
struct BranchOperator final : public Operator1<BranchOperatorInfo> {
template <BranchHint hint>
struct BranchOperator final : public Operator1<BranchHint> {
BranchOperator()
: Operator1<BranchOperatorInfo>( // --
IrOpcode::kBranch, Operator::kKontrol, // opcode
"Branch", // name
1, 0, 1, 0, 0, 2, // counts
BranchOperatorInfo{hint, is_safety_check}) {} // parameter
: Operator1<BranchHint>( // --
IrOpcode::kBranch, Operator::kKontrol, // opcode
"Branch", // name
1, 0, 1, 0, 0, 2, // counts
hint) {} // parameter
};
#define CACHED_BRANCH(Hint, IsCheck) \
BranchOperator<BranchHint::k##Hint, IsSafetyCheck::k##IsCheck> \
kBranch##Hint##IsCheck##Operator;
#define CACHED_BRANCH(Hint) \
BranchOperator<BranchHint::k##Hint> kBranch##Hint##Operator;
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@ -757,8 +693,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
DeoptimizeParameters(kKind, kReason, FeedbackSource(),
IsSafetyCheck::kNoSafetyCheck)) {}
DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
#define CACHED_DEOPTIMIZE(Kind, Reason) \
DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@ -766,8 +701,7 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
template <DeoptimizeKind kKind, DeoptimizeReason kReason,
IsSafetyCheck is_safety_check>
template <DeoptimizeKind kKind, DeoptimizeReason kReason>
struct DeoptimizeIfOperator final : public Operator1<DeoptimizeParameters> {
DeoptimizeIfOperator()
: Operator1<DeoptimizeParameters>( // --
@ -775,18 +709,15 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
DeoptimizeParameters(kKind, kReason, FeedbackSource(),
is_safety_check)) {}
DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason, \
IsSafetyCheck::k##IsCheck> \
kDeoptimizeIf##Kind##Reason##IsCheck##Operator;
#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
kDeoptimizeIf##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
template <DeoptimizeKind kKind, DeoptimizeReason kReason,
IsSafetyCheck is_safety_check>
template <DeoptimizeKind kKind, DeoptimizeReason kReason>
struct DeoptimizeUnlessOperator final
: public Operator1<DeoptimizeParameters> {
DeoptimizeUnlessOperator()
@ -795,14 +726,12 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
DeoptimizeParameters(kKind, kReason, FeedbackSource(),
is_safety_check)) {}
DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
DeoptimizeReason::k##Reason, \
IsSafetyCheck::k##IsCheck> \
kDeoptimizeUnless##Kind##Reason##IsCheck##Operator;
DeoptimizeReason::k##Reason> \
kDeoptimizeUnless##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
@ -815,8 +744,7 @@ struct CommonOperatorGlobalCache final {
"DynamicCheckMapsWithDeoptUnless", // name
6, 1, 1, 0, 1, 1, // counts
DeoptimizeParameters(DeoptimizeKind::kEagerWithResume, kReason,
FeedbackSource(),
IsSafetyCheck::kCriticalSafetyCheck)) {}
FeedbackSource())) {}
};
#define CACHED_DYNAMIC_CHECK_MAPS(Reason) \
DynamicMapCheckOperator<DeoptimizeReason::k##Reason> k##Reason##Operator;
@ -985,12 +913,10 @@ const Operator* CommonOperatorBuilder::StaticAssert(const char* source) {
1, 0, source);
}
const Operator* CommonOperatorBuilder::Branch(BranchHint hint,
IsSafetyCheck is_safety_check) {
#define CACHED_BRANCH(Hint, IsCheck) \
if (hint == BranchHint::k##Hint && \
is_safety_check == IsSafetyCheck::k##IsCheck) { \
return &cache_.kBranch##Hint##IsCheck##Operator; \
const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
#define CACHED_BRANCH(Hint) \
if (hint == BranchHint::k##Hint) { \
return &cache_.kBranch##Hint##Operator; \
}
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@ -1008,8 +934,7 @@ const Operator* CommonOperatorBuilder::Deoptimize(
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
// Uncached
DeoptimizeParameters parameter(kind, reason, feedback,
IsSafetyCheck::kNoSafetyCheck);
DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
@ -1020,17 +945,16 @@ const Operator* CommonOperatorBuilder::Deoptimize(
const Operator* CommonOperatorBuilder::DeoptimizeIf(
DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
if (kind == DeoptimizeKind::k##Kind && \
reason == DeoptimizeReason::k##Reason && \
is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
return &cache_.kDeoptimizeIf##Kind##Reason##IsCheck##Operator; \
FeedbackSource const& feedback) {
#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
if (kind == DeoptimizeKind::k##Kind && \
reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
// Uncached
DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@ -1041,17 +965,16 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
if (kind == DeoptimizeKind::k##Kind && \
reason == DeoptimizeReason::k##Reason && \
is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
return &cache_.kDeoptimizeUnless##Kind##Reason##IsCheck##Operator; \
FeedbackSource const& feedback) {
#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
if (kind == DeoptimizeKind::k##Kind && \
reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@ -1664,17 +1587,6 @@ const FrameStateInfo& FrameStateInfoOf(const Operator* op) {
return OpParameter<FrameStateInfo>(op);
}
IsSafetyCheck CombineSafetyChecks(IsSafetyCheck a, IsSafetyCheck b) {
if (a == IsSafetyCheck::kCriticalSafetyCheck ||
b == IsSafetyCheck::kCriticalSafetyCheck) {
return IsSafetyCheck::kCriticalSafetyCheck;
}
if (a == IsSafetyCheck::kSafetyCheck || b == IsSafetyCheck::kSafetyCheck) {
return IsSafetyCheck::kSafetyCheck;
}
return IsSafetyCheck::kNoSafetyCheck;
}
#undef COMMON_CACHED_OP_LIST
#undef CACHED_BRANCH_LIST
#undef CACHED_RETURN_LIST

View File

@ -51,20 +51,6 @@ inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchHint);
enum class IsSafetyCheck : uint8_t {
kCriticalSafetyCheck,
kSafetyCheck,
kNoSafetyCheck
};
// Get the more critical safety check of the two arguments.
IsSafetyCheck CombineSafetyChecks(IsSafetyCheck, IsSafetyCheck);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IsSafetyCheck);
inline size_t hash_value(IsSafetyCheck is_safety_check) {
return static_cast<size_t>(is_safety_check);
}
enum class TrapId : uint32_t {
#define DEF_ENUM(Name, ...) k##Name,
FOREACH_WASM_TRAPREASON(DEF_ENUM)
@ -78,24 +64,6 @@ std::ostream& operator<<(std::ostream&, TrapId trap_id);
TrapId TrapIdOf(const Operator* const op);
struct BranchOperatorInfo {
BranchHint hint;
IsSafetyCheck is_safety_check;
};
inline size_t hash_value(const BranchOperatorInfo& info) {
return base::hash_combine(info.hint, info.is_safety_check);
}
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchOperatorInfo);
inline bool operator==(const BranchOperatorInfo& a,
const BranchOperatorInfo& b) {
return a.hint == b.hint && a.is_safety_check == b.is_safety_check;
}
V8_EXPORT_PRIVATE const BranchOperatorInfo& BranchOperatorInfoOf(
const Operator* const) V8_WARN_UNUSED_RESULT;
V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const)
V8_WARN_UNUSED_RESULT;
@ -106,23 +74,17 @@ int ValueInputCountOfReturn(Operator const* const op);
class DeoptimizeParameters final {
public:
DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback,
IsSafetyCheck is_safety_check)
: kind_(kind),
reason_(reason),
feedback_(feedback),
is_safety_check_(is_safety_check) {}
FeedbackSource const& feedback)
: kind_(kind), reason_(reason), feedback_(feedback) {}
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
const FeedbackSource& feedback() const { return feedback_; }
IsSafetyCheck is_safety_check() const { return is_safety_check_; }
private:
DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
FeedbackSource const feedback_;
IsSafetyCheck is_safety_check_;
};
bool operator==(DeoptimizeParameters, DeoptimizeParameters);
@ -135,8 +97,6 @@ std::ostream& operator<<(std::ostream&, DeoptimizeParameters p);
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const)
V8_WARN_UNUSED_RESULT;
IsSafetyCheck IsSafetyCheckOf(const Operator* op) V8_WARN_UNUSED_RESULT;
class SelectParameters final {
public:
explicit SelectParameters(MachineRepresentation representation,
@ -479,8 +439,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Unreachable();
const Operator* StaticAssert(const char* source);
const Operator* End(size_t control_input_count);
const Operator* Branch(BranchHint = BranchHint::kNone,
IsSafetyCheck = IsSafetyCheck::kSafetyCheck);
const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* IfSuccess();
@ -492,14 +451,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback);
const Operator* DeoptimizeIf(
DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback,
IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
const Operator* DeoptimizeUnless(
DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback,
IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback);
const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback);
// DynamicCheckMapsWithDeoptUnless will call the dynamic map check builtin if
// the condition is false, which may then either deoptimize or resume
// execution.
@ -577,9 +532,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const wasm::FunctionSig* signature);
#endif // V8_ENABLE_WEBASSEMBLY
const Operator* MarkAsSafetyCheck(const Operator* op,
IsSafetyCheck safety_check);
const Operator* DelayedStringConstant(const StringConstantBase* str);
private:

View File

@ -15,8 +15,7 @@ namespace {
bool IsMachineLoad(Node* const node) {
const IrOpcode::Value opcode = node->opcode();
return opcode == IrOpcode::kLoad || opcode == IrOpcode::kPoisonedLoad ||
opcode == IrOpcode::kProtectedLoad ||
return opcode == IrOpcode::kLoad || opcode == IrOpcode::kProtectedLoad ||
opcode == IrOpcode::kUnalignedLoad ||
opcode == IrOpcode::kLoadImmutable;
}
@ -212,10 +211,6 @@ void DecompressionOptimizer::ChangeLoad(Node* const node) {
NodeProperties::ChangeOp(node,
machine()->LoadImmutable(compressed_load_rep));
break;
case IrOpcode::kPoisonedLoad:
NodeProperties::ChangeOp(node,
machine()->PoisonedLoad(compressed_load_rep));
break;
case IrOpcode::kProtectedLoad:
NodeProperties::ChangeOp(node,
machine()->ProtectedLoad(compressed_load_rep));

View File

@ -36,7 +36,6 @@ namespace internal {
namespace compiler {
enum class MaintainSchedule { kMaintain, kDiscard };
enum class MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
class EffectControlLinearizer {
public:
@ -44,13 +43,11 @@ class EffectControlLinearizer {
JSGraphAssembler* graph_assembler, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
MaskArrayIndexEnable mask_array_index,
MaintainSchedule maintain_schedule,
JSHeapBroker* broker)
: js_graph_(js_graph),
schedule_(schedule),
temp_zone_(temp_zone),
mask_array_index_(mask_array_index),
maintain_schedule_(maintain_schedule),
source_positions_(source_positions),
node_origins_(node_origins),
@ -80,7 +77,6 @@ class EffectControlLinearizer {
Node* LowerChangeTaggedToUint32(Node* node);
Node* LowerChangeTaggedToInt64(Node* node);
Node* LowerChangeTaggedToTaggedSigned(Node* node);
Node* LowerPoisonIndex(Node* node);
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
void LowerCheckMaps(Node* node, Node* frame_state);
void LowerDynamicCheckMaps(Node* node, Node* frame_state);
@ -338,7 +334,6 @@ class EffectControlLinearizer {
JSGraph* js_graph_;
Schedule* schedule_;
Zone* temp_zone_;
MaskArrayIndexEnable mask_array_index_;
MaintainSchedule maintain_schedule_;
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
@ -966,9 +961,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTruncateTaggedToFloat64:
result = LowerTruncateTaggedToFloat64(node);
break;
case IrOpcode::kPoisonIndex:
result = LowerPoisonIndex(node);
break;
case IrOpcode::kCheckClosure:
result = LowerCheckClosure(node, frame_state);
break;
@ -1788,14 +1780,6 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
return done.PhiAt(0);
}
Node* EffectControlLinearizer::LowerPoisonIndex(Node* node) {
Node* index = node->InputAt(0);
if (mask_array_index_ == MaskArrayIndexEnable::kMaskArrayIndex) {
index = __ Word32PoisonOnSpeculation(index);
}
return index;
}
Node* EffectControlLinearizer::LowerCheckClosure(Node* node,
Node* frame_state) {
Handle<FeedbackCell> feedback_cell = FeedbackCellOf(node->op());
@ -1831,8 +1815,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt(
__ Word32And(bitfield3,
__ Int32Constant(Map::Bits3::IsDeprecatedBit::kMask)),
__ Int32Constant(0));
__ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state,
IsSafetyCheck::kCriticalSafetyCheck);
__ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
@ -1842,7 +1825,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt(
__ Int32Constant(1), __ NoContextConstant());
Node* check = ObjectIsSmi(result);
__ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, feedback_source,
check, frame_state, IsSafetyCheck::kCriticalSafetyCheck);
check, frame_state);
}
void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
@ -1886,7 +1869,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* check = __ TaggedEqual(value_map, map);
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
frame_state, IsSafetyCheck::kCriticalSafetyCheck);
frame_state);
} else {
auto next_map = __ MakeLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &next_map);
@ -1908,7 +1891,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
frame_state, IsSafetyCheck::kCriticalSafetyCheck);
frame_state);
} else {
auto next_map = __ MakeLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &next_map);
@ -2528,8 +2511,8 @@ Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
Node* check = __ Uint32LessThan(index, limit);
if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
__ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
params.check_parameters().feedback(), check, frame_state,
IsSafetyCheck::kCriticalSafetyCheck);
params.check_parameters().feedback(), check,
frame_state);
} else {
auto if_abort = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@ -2574,8 +2557,8 @@ Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
Node* check = __ Uint64LessThan(index, limit);
if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
__ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
params.check_parameters().feedback(), check, frame_state,
IsSafetyCheck::kCriticalSafetyCheck);
params.check_parameters().feedback(), check,
frame_state);
} else {
auto if_abort = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@ -5776,8 +5759,7 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
Node* data_ptr = BuildTypedArrayDataPointer(base, external);
// Perform the actual typed element access.
return __ LoadElement(AccessBuilder::ForTypedArrayElement(
array_type, true, LoadSensitivity::kCritical),
return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
data_ptr, index);
}
@ -6796,26 +6778,13 @@ Node* EffectControlLinearizer::BuildIsClearedWeakReference(Node* maybe_object) {
#undef __
namespace {
MaskArrayIndexEnable MaskArrayForPoisonLevel(
PoisoningMitigationLevel poison_level) {
return (poison_level != PoisoningMitigationLevel::kDontPoison)
? MaskArrayIndexEnable::kMaskArrayIndex
: MaskArrayIndexEnable::kDoNotMaskArrayIndex;
}
} // namespace
void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poison_level,
JSHeapBroker* broker) {
JSGraphAssembler graph_assembler_(graph, temp_zone, base::nullopt, nullptr);
EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_,
temp_zone, source_positions, node_origins,
MaskArrayForPoisonLevel(poison_level),
MaintainSchedule::kDiscard, broker);
linearizer.Run();
}
@ -6824,16 +6793,13 @@ void LowerToMachineSchedule(JSGraph* js_graph, Schedule* schedule,
Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poison_level,
JSHeapBroker* broker) {
JSGraphAssembler graph_assembler(js_graph, temp_zone, base::nullopt,
schedule);
EffectControlLinearizer linearizer(js_graph, schedule, &graph_assembler,
temp_zone, source_positions, node_origins,
MaskArrayForPoisonLevel(poison_level),
MaintainSchedule::kMaintain, broker);
MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler,
poison_level);
MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler);
SelectLowering select_lowering(&graph_assembler, js_graph->graph());
graph_assembler.AddInlineReducer(&memory_lowering);
graph_assembler.AddInlineReducer(&select_lowering);

View File

@ -26,7 +26,7 @@ class JSHeapBroker;
V8_EXPORT_PRIVATE void LinearizeEffectControl(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
JSHeapBroker* broker);
// Performs effect control linearization lowering in addition to machine
// lowering, producing a scheduled graph that is ready for instruction
@ -34,7 +34,7 @@ V8_EXPORT_PRIVATE void LinearizeEffectControl(
V8_EXPORT_PRIVATE void LowerToMachineSchedule(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
JSHeapBroker* broker);
} // namespace compiler
} // namespace internal

View File

@ -829,46 +829,36 @@ Node* GraphAssembler::BitcastMaybeObjectToWord(Node* value) {
effect(), control()));
}
Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
return AddNode(graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value,
effect(), control()));
}
Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
FeedbackSource const& feedback,
Node* condition, Node* frame_state,
IsSafetyCheck is_safety_check) {
return AddNode(
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeKind::kEager, reason,
feedback, is_safety_check),
condition, frame_state, effect(), control()));
Node* condition, Node* frame_state) {
return AddNode(graph()->NewNode(
common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback),
condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback,
Node* condition, Node* frame_state,
IsSafetyCheck is_safety_check) {
return AddNode(graph()->NewNode(
common()->DeoptimizeIf(kind, reason, feedback, is_safety_check),
condition, frame_state, effect(), control()));
Node* condition, Node* frame_state) {
return AddNode(
graph()->NewNode(common()->DeoptimizeIf(kind, reason, feedback),
condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeKind kind,
DeoptimizeReason reason,
FeedbackSource const& feedback,
Node* condition, Node* frame_state,
IsSafetyCheck is_safety_check) {
return AddNode(graph()->NewNode(
common()->DeoptimizeUnless(kind, reason, feedback, is_safety_check),
condition, frame_state, effect(), control()));
Node* condition, Node* frame_state) {
return AddNode(
graph()->NewNode(common()->DeoptimizeUnless(kind, reason, feedback),
condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
FeedbackSource const& feedback,
Node* condition, Node* frame_state,
IsSafetyCheck is_safety_check) {
Node* condition, Node* frame_state) {
return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, feedback, condition,
frame_state, is_safety_check);
frame_state);
}
Node* GraphAssembler::DynamicCheckMapsWithDeoptUnless(Node* condition,
@ -924,8 +914,7 @@ void GraphAssembler::BranchWithCriticalSafetyCheck(
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
BranchImpl(condition, if_true, if_false, hint,
IsSafetyCheck::kCriticalSafetyCheck);
BranchImpl(condition, if_true, if_false, hint);
}
void GraphAssembler::RecordBranchInBlockUpdater(Node* branch,

View File

@ -330,24 +330,16 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
Node* Word32PoisonOnSpeculation(Node* value);
Node* DeoptimizeIf(
DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
Node* frame_state,
IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
Node* DeoptimizeIf(
DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback, Node* condition, Node* frame_state,
IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
Node* DeoptimizeIfNot(
DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback, Node* condition, Node* frame_state,
IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
Node* DeoptimizeIfNot(
DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
Node* frame_state,
IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
Node* DeoptimizeIf(DeoptimizeReason reason, FeedbackSource const& feedback,
Node* condition, Node* frame_state);
Node* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback, Node* condition,
Node* frame_state);
Node* DeoptimizeIfNot(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback, Node* condition,
Node* frame_state);
Node* DeoptimizeIfNot(DeoptimizeReason reason, FeedbackSource const& feedback,
Node* condition, Node* frame_state);
Node* DynamicCheckMapsWithDeoptUnless(Node* condition, Node* slot_index,
Node* map, Node* handler,
Node* feedback_vector,
@ -557,7 +549,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
void BranchImpl(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
BranchHint hint, IsSafetyCheck is_safety_check, Vars...);
BranchHint hint, Vars...);
void RecordBranchInBlockUpdater(Node* branch, Node* if_true_control,
Node* if_false_control,
BasicBlock* if_true_block,
@ -742,8 +734,7 @@ void GraphAssembler::Branch(Node* condition,
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck,
vars...);
BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
@ -751,20 +742,17 @@ void GraphAssembler::BranchWithHint(
Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false, BranchHint hint,
Vars... vars) {
BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck,
vars...);
BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
void GraphAssembler::BranchImpl(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
BranchHint hint, IsSafetyCheck is_safety_check,
Vars... vars) {
BranchHint hint, Vars... vars) {
DCHECK_NOT_NULL(control());
Node* branch = graph()->NewNode(common()->Branch(hint, is_safety_check),
condition, control());
Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
Node* if_true_control = control_ =
graph()->NewNode(common()->IfTrue(), branch);

View File

@ -728,8 +728,7 @@ class IteratingArrayBuiltinReducerAssembler : public JSCallReducerAssembler {
TNode<HeapObject> elements =
LoadField<HeapObject>(AccessBuilder::ForJSObjectElements(), o);
TNode<Object> value = LoadElement<Object>(
AccessBuilder::ForFixedArrayElement(kind, LoadSensitivity::kCritical),
elements, index);
AccessBuilder::ForFixedArrayElement(kind), elements, index);
return std::make_pair(index, value);
}
@ -6374,9 +6373,8 @@ Reduction JSCallReducer::ReduceStringPrototypeStringAt(
index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
Node* value = effect = graph()->NewNode(string_access_operator, receiver,
masked_index, effect, control);
index, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@ -6434,11 +6432,9 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
Node* etrue = effect;
Node* vtrue;
{
Node* masked_position = graph()->NewNode(
simplified()->PoisonIndex(), unsigned_position);
Node* string_first = etrue =
graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
masked_position, etrue, if_true);
unsigned_position, etrue, if_true);
Node* search_first =
jsgraph()->Constant(str.GetFirstChar().value());
@ -6489,10 +6485,8 @@ Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
Node* value = effect =
graph()->NewNode(simplified()->StringCharCodeAt(), receiver, masked_index,
effect, control);
Node* value = effect = graph()->NewNode(simplified()->StringCharCodeAt(),
receiver, index, effect, control);
value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
ReplaceWithValue(node, value, effect, control);

View File

@ -1721,7 +1721,6 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
Type::Any(),
MachineType::AnyTagged(),
kFullWriteBarrier,
LoadSensitivity::kUnsafe,
const_field_info};
// Note: the use of RawInobjectPropertyAt (vs. the higher-level

View File

@ -2453,7 +2453,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
field_type,
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
LoadSensitivity::kUnsafe,
access_info.GetConstFieldInfo(),
access_mode == AccessMode::kStoreInLiteral};
@ -2487,7 +2486,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier,
LoadSensitivity::kUnsafe,
access_info.GetConstFieldInfo(),
access_mode == AccessMode::kStoreInLiteral};
storage = effect =
@ -2793,10 +2791,8 @@ JSNativeContextSpecialization::BuildElementAccess(
if (situation == kHandleOOB_SmiCheckDone) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch = graph()->NewNode(
common()->Branch(BranchHint::kTrue,
IsSafetyCheck::kCriticalSafetyCheck),
check, control);
Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
@ -2984,10 +2980,9 @@ JSNativeContextSpecialization::BuildElementAccess(
element_type = Type::SignedSmall();
element_machine_type = MachineType::TaggedSigned();
}
ElementAccess element_access = {
kTaggedBase, FixedArray::kHeaderSize,
element_type, element_machine_type,
kFullWriteBarrier, LoadSensitivity::kCritical};
ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
element_type, element_machine_type,
kFullWriteBarrier};
// Access the actual element.
if (keyed_mode.access_mode() == AccessMode::kLoad) {
@ -3007,10 +3002,8 @@ JSNativeContextSpecialization::BuildElementAccess(
CanTreatHoleAsUndefined(receiver_maps)) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch = graph()->NewNode(
common()->Branch(BranchHint::kTrue,
IsSafetyCheck::kCriticalSafetyCheck),
check, control);
Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
@ -3293,9 +3286,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue,
IsSafetyCheck::kCriticalSafetyCheck),
check, *control);
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, *control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
// Do a real bounds check against {length}. This is in order to protect
@ -3306,10 +3297,8 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
CheckBoundsFlag::kConvertStringAndMinusZero |
CheckBoundsFlag::kAbortOnOutOfBounds),
index, length, *effect, if_true);
Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
Node* vtrue = etrue =
graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
masked_index, etrue, if_true);
Node* vtrue = etrue = graph()->NewNode(simplified()->StringCharCodeAt(),
receiver, index, etrue, if_true);
vtrue = graph()->NewNode(simplified()->StringFromSingleCharCode(), vtrue);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@ -3327,12 +3316,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
CheckBoundsFlag::kConvertStringAndMinusZero),
index, length, *effect, *control);
Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
// Return the character from the {receiver} as single character string.
Node* value = *effect =
graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
masked_index, *effect, *control);
Node* value = *effect = graph()->NewNode(
simplified()->StringCharCodeAt(), receiver, index, *effect, *control);
value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
return value;
}

View File

@ -214,15 +214,13 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kInitializeRootRegister = 1u << 3,
// Does not ever try to allocate space on our heap.
kNoAllocate = 1u << 4,
// Use retpoline for this call if indirect.
kRetpoline = 1u << 5,
// Use the kJavaScriptCallCodeStartRegister (fixed) register for the
// indirect target address when calling.
kFixedTargetRegister = 1u << 6,
kCallerSavedRegisters = 1u << 7,
kFixedTargetRegister = 1u << 5,
kCallerSavedRegisters = 1u << 6,
// The kCallerSavedFPRegisters only matters (and set) when the more general
// flag for kCallerSavedRegisters above is also set.
kCallerSavedFPRegisters = 1u << 8,
kCallerSavedFPRegisters = 1u << 7,
// Tail calls for tier up are special (in fact they are different enough
// from normal tail calls to warrant a dedicated opcode; but they also have
// enough similar aspects that reusing the TailCall opcode is pragmatic).
@ -238,15 +236,15 @@ class V8_EXPORT_PRIVATE CallDescriptor final
//
// In other words, behavior is identical to a jmp instruction prior caller
// frame construction.
kIsTailCallForTierUp = 1u << 9,
kIsTailCallForTierUp = 1u << 8,
// AIX has a function descriptor by default but it can be disabled for a
// certain CFunction call (only used for Kind::kCallAddress).
kNoFunctionDescriptor = 1u << 9,
// Flags past here are *not* encoded in InstructionCode and are thus not
// accessible from the code generator. See also
// kFlagsBitsEncodedInInstructionCode.
// AIX has a function descriptor by default but it can be disabled for a
// certain CFunction call (only used for Kind::kCallAddress).
kNoFunctionDescriptor = 1u << 10,
};
using Flags = base::Flags<Flag>;

View File

@ -124,7 +124,6 @@ class MachineRepresentationInferrer {
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
case IrOpcode::kPoisonedLoad:
representation_vector_[node->id()] = PromoteRepresentation(
LoadRepresentationOf(node->op()).representation());
break;
@ -206,15 +205,8 @@ class MachineRepresentationInferrer {
case IrOpcode::kChangeInt32ToTagged:
case IrOpcode::kChangeUint32ToTagged:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kTaggedPoisonOnSpeculation:
representation_vector_[node->id()] = MachineRepresentation::kTagged;
break;
case IrOpcode::kWord32PoisonOnSpeculation:
representation_vector_[node->id()] = MachineRepresentation::kWord32;
break;
case IrOpcode::kWord64PoisonOnSpeculation:
representation_vector_[node->id()] = MachineRepresentation::kWord64;
break;
case IrOpcode::kCompressedHeapConstant:
representation_vector_[node->id()] =
MachineRepresentation::kCompressedPointer;
@ -394,14 +386,6 @@ class MachineRepresentationChecker {
CheckValueInputRepresentationIs(
node, 0, MachineType::PointerRepresentation());
break;
case IrOpcode::kWord32PoisonOnSpeculation:
CheckValueInputRepresentationIs(node, 0,
MachineRepresentation::kWord32);
break;
case IrOpcode::kWord64PoisonOnSpeculation:
CheckValueInputRepresentationIs(node, 0,
MachineRepresentation::kWord64);
break;
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
if (COMPRESS_POINTERS_BOOL) {
@ -410,9 +394,6 @@ class MachineRepresentationChecker {
CheckValueInputIsTagged(node, 0);
}
break;
case IrOpcode::kTaggedPoisonOnSpeculation:
CheckValueInputIsTagged(node, 0);
break;
case IrOpcode::kTruncateFloat64ToWord32:
case IrOpcode::kTruncateFloat64ToUint32:
case IrOpcode::kTruncateFloat64ToFloat32:
@ -566,7 +547,6 @@ class MachineRepresentationChecker {
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord64AtomicLoad:
case IrOpcode::kPoisonedLoad:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());

View File

@ -124,7 +124,6 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
IrOpcode::kWord32AtomicLoad == op->opcode() ||
IrOpcode::kWord64AtomicLoad == op->opcode() ||
IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
IrOpcode::kPoisonedLoad == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode() ||
IrOpcode::kLoadImmutable == op->opcode());
return OpParameter<LoadRepresentation>(op);
@ -831,13 +830,6 @@ struct MachineOperatorGlobalCache {
Operator::kEliminatable, "Load", 2, 1, \
1, 1, 1, 0, MachineType::Type()) {} \
}; \
struct PoisonedLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
PoisonedLoad##Type##Operator() \
: Operator1<LoadRepresentation>( \
IrOpcode::kPoisonedLoad, Operator::kEliminatable, \
"PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
struct UnalignedLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
UnalignedLoad##Type##Operator() \
@ -861,7 +853,6 @@ struct MachineOperatorGlobalCache {
0, 0, 1, 0, 0, MachineType::Type()) {} \
}; \
Load##Type##Operator kLoad##Type; \
PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type; \
LoadImmutable##Type##Operator kLoadImmutable##Type;
@ -1157,30 +1148,6 @@ struct MachineOperatorGlobalCache {
};
BitcastMaybeObjectToWordOperator kBitcastMaybeObjectToWord;
struct TaggedPoisonOnSpeculation : public Operator {
TaggedPoisonOnSpeculation()
: Operator(IrOpcode::kTaggedPoisonOnSpeculation,
Operator::kEliminatable | Operator::kNoWrite,
"TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
};
TaggedPoisonOnSpeculation kTaggedPoisonOnSpeculation;
struct Word32PoisonOnSpeculation : public Operator {
Word32PoisonOnSpeculation()
: Operator(IrOpcode::kWord32PoisonOnSpeculation,
Operator::kEliminatable | Operator::kNoWrite,
"Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
};
Word32PoisonOnSpeculation kWord32PoisonOnSpeculation;
struct Word64PoisonOnSpeculation : public Operator {
Word64PoisonOnSpeculation()
: Operator(IrOpcode::kWord64PoisonOnSpeculation,
Operator::kEliminatable | Operator::kNoWrite,
"Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
};
Word64PoisonOnSpeculation kWord64PoisonOnSpeculation;
struct AbortCSAAssertOperator : public Operator {
AbortCSAAssertOperator()
: Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
@ -1366,16 +1333,6 @@ const Operator* MachineOperatorBuilder::LoadImmutable(LoadRepresentation rep) {
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
return &cache_.kPoisonedLoad##Type; \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
@ -1813,18 +1770,6 @@ const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
return &cache_.kWord32AtomicPairCompareExchange;
}
const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
return &cache_.kTaggedPoisonOnSpeculation;
}
const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
return &cache_.kWord32PoisonOnSpeculation;
}
const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
return &cache_.kWord64PoisonOnSpeculation;
}
#define EXTRACT_LANE_OP(Type, Sign, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane##Sign( \
int32_t lane_index) { \

View File

@ -852,7 +852,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// load [base + index]
const Operator* Load(LoadRepresentation rep);
const Operator* LoadImmutable(LoadRepresentation rep);
const Operator* PoisonedLoad(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep);
const Operator* LoadTransform(MemoryAccessKind kind,
@ -879,11 +878,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
// Destroy value by masking when misspeculating.
const Operator* TaggedPoisonOnSpeculation();
const Operator* Word32PoisonOnSpeculation();
const Operator* Word64PoisonOnSpeculation();
// Access to the machine stack.
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
@ -980,7 +974,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
V(Word, Ror) \
V(Word, Clz) \
V(Word, Equal) \
V(Word, PoisonOnSpeculation) \
V(Int, Add) \
V(Int, Sub) \
V(Int, Mul) \

View File

@ -60,7 +60,6 @@ class MemoryLowering::AllocationGroup final : public ZoneObject {
MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
JSGraphAssembler* graph_assembler,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
WriteBarrierAssertFailedCallback callback,
const char* function_debug_name)
@ -71,7 +70,6 @@ MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
machine_(jsgraph->machine()),
graph_assembler_(graph_assembler),
allocation_folding_(allocation_folding),
poisoning_level_(poisoning_level),
write_barrier_assert_failed_(callback),
function_debug_name_(function_debug_name) {}
@ -401,11 +399,7 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) {
node->ReplaceInput(1, ComputeIndex(access, index));
MachineType type = access.machine_type;
DCHECK(!type.IsMapWord());
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
NodeProperties::ChangeOp(node, machine()->Load(type));
return Changed(node);
}
@ -413,8 +407,7 @@ Node* MemoryLowering::DecodeExternalPointer(
Node* node, ExternalPointerTag external_pointer_tag) {
#ifdef V8_HEAP_SANDBOX
DCHECK(V8_HEAP_SANDBOX_BOOL);
DCHECK(node->opcode() == IrOpcode::kLoad ||
node->opcode() == IrOpcode::kPoisonedLoad);
DCHECK(node->opcode() == IrOpcode::kLoad);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
__ InitializeEffectControl(effect, control);
@ -476,16 +469,11 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
}
if (type.IsMapWord()) {
DCHECK(!NeedsPoisoning(access.load_sensitivity));
DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
return ReduceLoadMap(node);
}
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
NodeProperties::ChangeOp(node, machine()->Load(type));
if (V8_HEAP_SANDBOX_BOOL &&
access.type.Is(Type::SandboxedExternalPointer())) {
@ -655,21 +643,6 @@ WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
return write_barrier_kind;
}
bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
// Safe loads do not need poisoning.
if (load_sensitivity == LoadSensitivity::kSafe) return false;
switch (poisoning_level_) {
case PoisoningMitigationLevel::kDontPoison:
return false;
case PoisoningMitigationLevel::kPoisonAll:
return true;
case PoisoningMitigationLevel::kPoisonCriticalOnly:
return load_sensitivity == LoadSensitivity::kCritical;
}
UNREACHABLE();
}
MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Zone* zone)

View File

@ -75,7 +75,6 @@ class MemoryLowering final : public Reducer {
MemoryLowering(
JSGraph* jsgraph, Zone* zone, JSGraphAssembler* graph_assembler,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding =
AllocationFolding::kDontAllocationFolding,
WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*,
@ -112,7 +111,6 @@ class MemoryLowering final : public Reducer {
Node* DecodeExternalPointer(Node* encoded_pointer, ExternalPointerTag tag);
Reduction ReduceLoadMap(Node* encoded_pointer);
Node* ComputeIndex(ElementAccess const& access, Node* node);
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
void EnsureAllocateOperator();
Node* GetWasmInstanceNode();
@ -133,7 +131,6 @@ class MemoryLowering final : public Reducer {
MachineOperatorBuilder* machine_;
JSGraphAssembler* graph_assembler_;
AllocationFolding allocation_folding_;
PoisoningMitigationLevel poisoning_level_;
WriteBarrierAssertFailedCallback write_barrier_assert_failed_;
const char* function_debug_name_;

View File

@ -40,7 +40,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoadLane:
case IrOpcode::kLoadTransform:
case IrOpcode::kMemoryBarrier:
case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kRetain:
@ -54,7 +53,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStoreField:
case IrOpcode::kStoreLane:
case IrOpcode::kStoreToObject:
case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kUnreachable:
@ -77,7 +75,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord32AtomicSub:
case IrOpcode::kWord32AtomicXor:
case IrOpcode::kWord32PoisonOnSpeculation:
case IrOpcode::kWord64AtomicAdd:
case IrOpcode::kWord64AtomicAnd:
case IrOpcode::kWord64AtomicCompareExchange:
@ -87,7 +84,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kWord64AtomicStore:
case IrOpcode::kWord64AtomicSub:
case IrOpcode::kWord64AtomicXor:
case IrOpcode::kWord64PoisonOnSpeculation:
return false;
case IrOpcode::kCall:
@ -183,13 +179,12 @@ void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
} // namespace
MemoryOptimizer::MemoryOptimizer(
JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
JSGraph* jsgraph, Zone* zone,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter)
: graph_assembler_(jsgraph, zone),
memory_lowering_(jsgraph, zone, &graph_assembler_, poisoning_level,
allocation_folding, WriteBarrierAssertFailed,
function_debug_name),
memory_lowering_(jsgraph, zone, &graph_assembler_, allocation_folding,
WriteBarrierAssertFailed, function_debug_name),
jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),

View File

@ -30,7 +30,6 @@ using NodeId = uint32_t;
class MemoryOptimizer final {
public:
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;

Some files were not shown because too many files have changed in this diff Show More