[wasm-gc] Fix call feedback vector issues after memory out of bounds accesses

Turbofan uses the feedback vectors created by liftoff during
compilation. It is assumed that for any given function liftoff and
turbofan use same-sized feedback vectors.

Calls in unreachable code don't allocate entries in the feedback vector.
Therefore it is required that turbofan and liftoff have the same
understanding of which parts of the code are treated as unreachable.
This is achieved by moving the unreachable handling from liftoff
into the decoder that is also used for the turbofan compilation.

Bug: chromium:1403398
Change-Id: I113726c1a0d773ea9483c80d8e3c3084be423ca2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4152477
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Matthias Liedtke <mliedtke@chromium.org>
Cr-Commit-Position: refs/heads/main@{#85248}
This commit is contained in:
Matthias Liedtke 2023-01-12 11:55:53 +01:00 committed by V8 LUCI CQ
parent 25005c142c
commit 89677cfaa8
8 changed files with 191 additions and 69 deletions

View File

@ -3478,9 +3478,9 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// machine.
if (offset > std::numeric_limits<uintptr_t>::max() ||
!base::IsInBounds<uintptr_t>(offset, access_size,
env_->max_memory_size)) {
env_->module->max_memory_size)) {
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
Trap(wasm::TrapReason::kTrapMemOutOfBounds, position);
return {gasm_->UintPtrConstant(0), kOutOfBounds};
}
@ -3516,8 +3516,8 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
uintptr_t end_offset = offset + access_size - 1u;
UintPtrMatcher match(index);
if (match.HasResolvedValue() && end_offset <= env_->min_memory_size &&
match.ResolvedValue() < env_->min_memory_size - end_offset) {
if (match.HasResolvedValue() && end_offset <= env_->module->min_memory_size &&
match.ResolvedValue() < env_->module->min_memory_size - end_offset) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
return {index, kInBounds};
@ -3530,7 +3530,7 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
Node* mem_size = instance_cache_->mem_size;
Node* end_offset_node = mcgraph_->UintPtrConstant(end_offset);
if (end_offset > env_->min_memory_size) {
if (end_offset > env_->module->min_memory_size) {
// The end offset is larger than the smallest memory.
// Dynamically check the end offset against the dynamic memory size.
Node* cond = gasm_->UintLessThan(end_offset_node, mem_size);

View File

@ -2962,9 +2962,9 @@ class LiftoffCompiler {
Register BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
uint64_t offset, LiftoffRegister index,
LiftoffRegList pinned, ForceCheck force_check) {
const bool statically_oob =
!base::IsInBounds<uintptr_t>(offset, access_size,
env_->max_memory_size);
// This is ensured by the decoder.
DCHECK(base::IsInBounds<uintptr_t>(offset, access_size,
env_->module->max_memory_size));
// After bounds checking, we know that the index must be ptrsize, hence only
// look at the lower word on 32-bit systems (the high word is bounds-checked
@ -2980,8 +2980,7 @@ class LiftoffCompiler {
// Early return for trap handler.
DCHECK_IMPLIES(env_->module->is_memory64,
env_->bounds_checks == kExplicitBoundsChecks);
if (!force_check && !statically_oob &&
env_->bounds_checks == kTrapHandler) {
if (!force_check && env_->bounds_checks == kTrapHandler) {
// With trap handlers we should not have a register pair as input (we
// would only return the lower half).
DCHECK(index.is_gp());
@ -2995,18 +2994,12 @@ class LiftoffCompiler {
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, 0);
if (V8_UNLIKELY(statically_oob)) {
__ emit_jump(trap_label);
decoder->SetSucceedingCodeDynamicallyUnreachable();
return no_reg;
}
// Convert the index to ptrsize, bounds-checking the high word on 32-bit
// systems for memory64.
if (!env_->module->is_memory64) {
__ emit_u32_to_uintptr(index_ptrsize, index_ptrsize);
} else if (kSystemPointerSize == kInt32Size) {
DCHECK_GE(kMaxUInt32, env_->max_memory_size);
DCHECK_GE(kMaxUInt32, env_->module->max_memory_size);
FREEZE_STATE(trapping);
__ emit_cond_jump(kNotEqualZero, trap_label, kI32, index.high_gp(),
no_reg, trapping);
@ -3026,7 +3019,7 @@ class LiftoffCompiler {
// If the end offset is larger than the smallest memory, dynamically check
// the end offset against the actual memory size, which is not known at
// compile time. Otherwise, only one check is required (see below).
if (end_offset > env_->min_memory_size) {
if (end_offset > env_->module->min_memory_size) {
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kIntPtrKind,
end_offset_reg.gp(), mem_size.gp(), trapping);
}
@ -3139,7 +3132,7 @@ class LiftoffCompiler {
if (effective_offset < index // overflow
|| !base::IsInBounds<uintptr_t>(effective_offset, access_size,
env_->min_memory_size)) {
env_->module->min_memory_size)) {
return false;
}
@ -3191,7 +3184,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister();
index = BoundsCheckMem(decoder, type.size(), offset, full_index, {},
kDontForceCheck);
if (index == no_reg) return;
SCOPED_CODE_COMMENT("load from memory");
LiftoffRegList pinned{index};
@ -3235,7 +3227,6 @@ class LiftoffCompiler {
transform == LoadTransformationKind::kExtend ? 8 : type.size();
Register index = BoundsCheckMem(decoder, access_size, imm.offset,
full_index, {}, kDontForceCheck);
if (index == no_reg) return;
uintptr_t offset = imm.offset;
LiftoffRegList pinned{index};
@ -3274,7 +3265,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDontForceCheck);
if (index == no_reg) return;
uintptr_t offset = imm.offset;
pinned.set(index);
@ -3325,7 +3315,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister(pinned);
index = BoundsCheckMem(decoder, type.size(), imm.offset, full_index,
pinned, kDontForceCheck);
if (index == no_reg) return;
pinned.set(index);
SCOPED_CODE_COMMENT("store to memory");
@ -3358,7 +3347,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDontForceCheck);
if (index == no_reg) return;
uintptr_t offset = imm.offset;
pinned.set(index);
@ -4756,7 +4744,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDoForceCheck);
if (index == no_reg) return;
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
@ -4778,7 +4765,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck);
if (index == no_reg) return;
LiftoffRegList pinned{index};
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
@ -4824,7 +4810,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDoForceCheck);
if (index == no_reg) return;
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
@ -4848,7 +4833,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PeekToRegister(2, {});
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck);
if (index == no_reg) return;
LiftoffRegList pinned{index};
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
@ -4883,7 +4867,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDoForceCheck);
if (index == no_reg) return;
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
@ -4930,7 +4913,6 @@ class LiftoffCompiler {
Register index_reg =
BoundsCheckMem(decoder, value_kind_size(kind), imm.offset, full_index,
pinned, kDoForceCheck);
if (index_reg == no_reg) return;
pinned.set(index_reg);
AlignmentCheckMem(decoder, value_kind_size(kind), imm.offset, index_reg,
pinned);
@ -5011,7 +4993,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PeekToRegister(1, {});
Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset,
full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned{index_reg};
AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
@ -5194,7 +5175,7 @@ class LiftoffCompiler {
// For memory64 on 32-bit systems, combine all high words for a zero-check
// and only use the low words afterwards. This keeps the register pressure
// managable.
DCHECK_GE(kMaxUInt32, env_->max_memory_size);
DCHECK_GE(kMaxUInt32, env_->module->max_memory_size);
pinned->set(reg.low());
if (*high_word == no_reg) {
// Choose a register to hold the (combination of) high word(s). It cannot

View File

@ -64,14 +64,6 @@ struct CompilationEnv {
// be generated differently.
const RuntimeExceptionSupport runtime_exception_support;
// The smallest size of any memory that could be used with this module, in
// bytes.
const uintptr_t min_memory_size;
// The largest size of any memory that could be used with this module, in
// bytes.
const uintptr_t max_memory_size;
// Features enabled for this compilation.
const WasmFeatures enabled_features;
@ -85,25 +77,8 @@ struct CompilationEnv {
: module(module),
bounds_checks(bounds_checks),
runtime_exception_support(runtime_exception_support),
min_memory_size(MinPages(module) * kWasmPageSize),
max_memory_size(MaxPages(module) * kWasmPageSize),
enabled_features(enabled_features),
dynamic_tiering(dynamic_tiering) {}
static constexpr uintptr_t MinPages(const WasmModule* module) {
if (!module) return 0;
const uintptr_t platform_max_pages =
module->is_memory64 ? kV8MaxWasmMemory64Pages : kV8MaxWasmMemory32Pages;
return std::min(platform_max_pages, uintptr_t{module->initial_pages});
}
static constexpr uintptr_t MaxPages(const WasmModule* module) {
if (!module) return kV8MaxWasmMemory32Pages;
const uintptr_t platform_max_pages =
module->is_memory64 ? kV8MaxWasmMemory64Pages : kV8MaxWasmMemory32Pages;
if (!module->has_maximum_pages) return platform_max_pages;
return std::min(platform_max_pages, uintptr_t{module->maximum_pages});
}
};
// The wire bytes are either owned by the StreamingDecoder, or (after streaming)

View File

@ -16,6 +16,7 @@
#include <optional>
#include "src/base/bounds.h"
#include "src/base/small-vector.h"
#include "src/base/strings.h"
#include "src/base/v8-fallthrough.h"
@ -4155,7 +4156,9 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value index = Peek(0, 0, index_type);
Value result = CreateValue(type.value_type());
CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadMem, type, imm, index, &result);
if (V8_LIKELY(!CheckStaticallyOutOfBounds(type.size(), imm.offset))) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadMem, type, imm, index, &result);
}
Drop(index);
Push(result);
return prefix_len + imm.length;
@ -4172,8 +4175,10 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value index = Peek(0, 0, index_type);
Value result = CreateValue(kWasmS128);
CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadTransform, type, transform, imm,
index, &result);
if (V8_LIKELY(!CheckStaticallyOutOfBounds(type.size(), imm.offset))) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadTransform, type, transform, imm,
index, &result);
}
Drop(index);
Push(result);
return opcode_length + imm.length;
@ -4190,8 +4195,10 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
Value index = Peek(1, 0, kWasmI32);
Value result = CreateValue(kWasmS128);
CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadLane, type, v128, index, mem_imm,
lane_imm.lane, &result);
if (V8_LIKELY(!CheckStaticallyOutOfBounds(type.size(), mem_imm.offset))) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadLane, type, v128, index, mem_imm,
lane_imm.lane, &result);
}
Drop(2);
Push(result);
return opcode_length + mem_imm.length + lane_imm.length;
@ -4208,12 +4215,24 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
Value v128 = Peek(0, 1, kWasmS128);
Value index = Peek(1, 0, kWasmI32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(StoreLane, type, mem_imm, index, v128,
lane_imm.lane);
if (V8_LIKELY(!CheckStaticallyOutOfBounds(type.size(), mem_imm.offset))) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(StoreLane, type, mem_imm, index, v128,
lane_imm.lane);
}
Drop(2);
return opcode_length + mem_imm.length + lane_imm.length;
}
bool CheckStaticallyOutOfBounds(uintptr_t size, uintptr_t offset) {
const bool statically_oob = !base::IsInBounds<uintptr_t>(
offset, size, this->module_->max_memory_size);
if (statically_oob) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(Trap, TrapReason::kTrapMemOutOfBounds);
SetSucceedingCodeDynamicallyUnreachable();
}
return statically_oob;
}
int DecodeStoreMem(StoreType store, int prefix_len = 1) {
MemoryAccessImmediate imm =
MakeMemoryAccessImmediate(prefix_len, store.size_log_2());
@ -4221,7 +4240,9 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
Value value = Peek(0, 1, store.value_type());
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value index = Peek(1, 0, index_type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(StoreMem, store, imm, index, value);
if (V8_LIKELY(!CheckStaticallyOutOfBounds(store.size(), imm.offset))) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(StoreMem, store, imm, index, value);
}
Drop(2);
return prefix_len + imm.length;
}
@ -6148,14 +6169,20 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
CHECK(!this->module_->is_memory64);
ArgVector args = PeekArgs(sig);
if (sig->return_count() == 0) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, base::VectorOf(args),
imm, nullptr);
if (V8_LIKELY(
!CheckStaticallyOutOfBounds(memtype.MemSize(), imm.offset))) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode,
base::VectorOf(args), imm, nullptr);
}
DropArgs(sig);
} else {
DCHECK_EQ(1, sig->return_count());
Value result = CreateValue(sig->GetReturn());
CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, base::VectorOf(args),
imm, &result);
if (V8_LIKELY(
!CheckStaticallyOutOfBounds(memtype.MemSize(), imm.offset))) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode,
base::VectorOf(args), imm, &result);
}
DropArgs(sig);
Push(result);
}

View File

@ -848,6 +848,7 @@ class ModuleDecoderTemplate : public Decoder {
break;
}
}
UpdateMemorySizes();
tracer_.ImportsDone();
}
@ -939,6 +940,20 @@ class ModuleDecoderTemplate : public Decoder {
module_->has_maximum_pages, max_pages, &module_->maximum_pages,
module_->is_memory64 ? k64BitLimits : k32BitLimits);
}
UpdateMemorySizes();
}
void UpdateMemorySizes() {
// Set min and max memory size.
const uintptr_t platform_max_pages = module_->is_memory64
? kV8MaxWasmMemory64Pages
: kV8MaxWasmMemory32Pages;
module_->min_memory_size =
std::min(platform_max_pages, uintptr_t{module_->initial_pages}) *
kWasmPageSize;
module_->max_memory_size =
std::min(platform_max_pages, uintptr_t{module_->maximum_pages}) *
kWasmPageSize;
}
void DecodeGlobalSection() {

View File

@ -499,6 +499,8 @@ struct V8_EXPORT_PRIVATE WasmModule {
Zone signature_zone;
uint32_t initial_pages = 0; // initial size of the memory in 64k pages
uint32_t maximum_pages = 0; // maximum size of the memory in 64k pages
uintptr_t min_memory_size = 0; // smallest size of any memory in bytes
uintptr_t max_memory_size = 0; // largest size of any memory in bytes
bool has_shared_memory = false; // true if memory is a SharedArrayBuffer
bool has_maximum_pages = false; // true if there is a maximum memory size
bool is_memory64 = false; // true if the memory is 64 bit

View File

@ -125,6 +125,8 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size, SharedFlag shared) {
? test_module_->maximum_pages
: initial_pages;
test_module_->has_memory = true;
test_module_->min_memory_size = initial_pages * kWasmPageSize;
test_module_->max_memory_size = maximum_pages * kWasmPageSize;
// Create the WasmMemoryObject.
Handle<WasmMemoryObject> memory_object =

View File

@ -0,0 +1,120 @@
// Copyright 2023 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --experimental-wasm-gc --allow-natives-syntax
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
let simdSupported = (() => {
const builder = new WasmModuleBuilder();
builder.addMemory(10, 10);
builder.addFunction(null, makeSig([], [kWasmS128]))
.addBody([
kExprI32Const, 0,
kSimdPrefix, kExprS128LoadMem, 1, 0,
]);
try {
builder.instantiate();
return true;
} catch(e) {
assertContains('SIMD unsupported', '' + e)
return false;
}
})();
const builder = new WasmModuleBuilder();
builder.addMemory(0, 0); // max size = 0
const callee = builder.addFunction('callee', makeSig([], []))
.addBody([]);
const testCases = {
'StoreMem': [
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32StoreMem, 1, 0x0f,
],
'LoadMem': [
kExprI32Const, 0,
kExprI32LoadMem, 1, 0x0f,
kExprDrop,
],
'atomicStore': [
kExprI32Const, 0,
kExprI64Const, 0,
kAtomicPrefix, kExprI64AtomicStore16U, 1, 0x0f,
],
'atomicLoad': [
kExprI32Const, 0,
kAtomicPrefix, kExprI64AtomicLoad16U, 1, 0x0f,
kExprDrop,
],
};
if (simdSupported) {
Object.assign(testCases, {
'SimdStoreMem': [
kExprI32Const, 0,
...WasmModuleBuilder.defaultFor(kWasmS128),
kSimdPrefix, kExprS128StoreMem, 1, 0,
],
'SimdLoadMem': [
kExprI32Const, 0,
kSimdPrefix, kExprS128LoadMem, 1, 0,
kExprDrop,
],
'SimdStoreLane': [
kExprI32Const, 0,
...WasmModuleBuilder.defaultFor(kWasmS128),
kSimdPrefix, kExprS128Store32Lane, 1, 0, 0, 0, 0,
],
'SimdLoadLane': [
kExprI32Const, 0,
...WasmModuleBuilder.defaultFor(kWasmS128),
kSimdPrefix, kExprS128Load32Lane, 1, 0, 0, 0, 0,
],
'SimdLoadTransform': [
kExprI32Const, 0x00,
kExprI32Const, 0x00,
kSimdPrefix, kExprS128Load32Splat, 1, 0, 0, 0, 0,
],
});
}
const functionIdx = {};
for (const [name, code] of Object.entries(testCases)) {
functionIdx[name] = builder.addFunction(name, makeSig([], []))
.exportFunc()
.addBody([
// Some call that allocates a feedback vector.
// This is required to hit the invariant on the call below.
kExprCallFunction, callee.index,
// Static out of bounds memory access.
...code,
// Call function.
// With speculative inlining this requires a slot in the feedback vector.
// As it is unreachable based on trapping out of bounds store, the
// allocation of this slot can be skipped. However, this should be treated
// consistently between liftoff and turbofan.
kExprCallFunction, callee.index,
]).index;
}
const instance = builder.instantiate();
function run(fct) {
try {
fct();
assertUnreachable();
} catch (e) {
assertContains('memory access out of bounds', '' + e)
}
}
for (const [name, index] of Object.entries(functionIdx)) {
print(`Test ${name}`);
// Create feedback vectors in liftoff compilation.
for (let i = 0; i < 5; ++i) run(instance.exports[name]);
// Force turbofan compilation.
%WasmTierUpFunction(instance, index);
run(instance.exports[name]);
}