[wasm][liftoff] Always zero-extend 32 bit offsets

The upper 32 bits of the 64 bit offset register are not guaranteed to be
cleared, so a zero-extension is needed. We already do the zero-extension
in the case of explicit bounds checking, but this should also be done if
the trap handler is enabled.

R=clemensb@chromium.org
CC=jkummerow@chromium.org

Bug: v8:11809
Change-Id: I21e2535c701041d11fa06c176fa683d82db0a3f1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2917612
Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74881}
This commit is contained in:
Thibaud Michaud 2021-06-01 13:57:21 +02:00 committed by V8 LUCI CQ
parent def58d04ff
commit 2b77ca200c
7 changed files with 83 additions and 12 deletions

View File

@ -769,7 +769,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
uint32_t* protected_load_pc, bool is_load_mem,
bool i64_offset) {
// Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
liftoff::LoadInternal(this, dst, src_addr, offset_reg,

View File

@ -126,9 +126,13 @@ inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
template <typename T>
inline MemOperand GetMemOp(LiftoffAssembler* assm,
UseScratchRegisterScope* temps, Register addr,
Register offset, T offset_imm) {
Register offset, T offset_imm,
bool i64_offset = false) {
if (offset.is_valid()) {
if (offset_imm == 0) return MemOperand(addr.X(), offset.X());
if (offset_imm == 0) {
return i64_offset ? MemOperand(addr.X(), offset.X())
: MemOperand(addr.X(), offset.W(), UXTW);
}
Register tmp = temps->AcquireX();
DCHECK_GE(kMaxUInt32, offset_imm);
assm->Add(tmp, offset.X(), offset_imm);
@ -493,10 +497,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
uint32_t* protected_load_pc, bool is_load_mem,
bool i64_offset) {
UseScratchRegisterScope temps(this);
MemOperand src_op =
liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
offset_imm, i64_offset);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
case LoadType::kI32Load8U:

View File

@ -391,7 +391,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
uint32_t* protected_load_pc, bool is_load_mem,
bool i64_offset) {
// Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
DCHECK_EQ(type.value_type() == kWasmI64, dst.is_gp_pair());

View File

@ -675,7 +675,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uintptr_t offset_imm, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr,
bool is_load_mem = false);
bool is_load_mem = false, bool i64_offset = false);
inline void Store(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src, StoreType type,
LiftoffRegList pinned,

View File

@ -2792,6 +2792,7 @@ class LiftoffCompiler {
// Only look at the slot, do not pop it yet (will happen in PopToRegister
// below, if this is not a statically-in-bounds index).
auto& index_slot = __ cache_state()->stack_state.back();
bool i64_offset = index_val.type == kWasmI64;
if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
__ cache_state()->stack_state.pop_back();
DEBUG_CODE_COMMENT("load from memory (constant offset)");
@ -2799,7 +2800,8 @@ class LiftoffCompiler {
Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
__ Load(value, mem, no_reg, offset, type, pinned, nullptr, true);
__ Load(value, mem, no_reg, offset, type, pinned, nullptr, true,
i64_offset);
__ PushRegister(kind, value);
} else {
LiftoffRegister full_index = __ PopToRegister();
@ -2818,8 +2820,8 @@ class LiftoffCompiler {
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0;
__ Load(value, mem, index, offset, type, pinned, &protected_load_pc,
true);
__ Load(value, mem, index, offset, type, pinned, &protected_load_pc, true,
i64_offset);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);

View File

@ -397,7 +397,11 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
uint32_t* protected_load_pc, bool is_load_mem,
bool i64_offset) {
if (offset_reg != no_reg && !i64_offset) {
AssertZeroExtended(offset_reg);
}
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {

View File

@ -0,0 +1,58 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --enable-testing-opcode-in-wasm --nowasm-tier-up --wasm-tier-mask-for-testing=2
load("test/mjsunit/wasm/wasm-module-builder.js");
var instance = (function () {
var builder = new WasmModuleBuilder();
builder.addMemory(1, 1, false /* exported */);
var sig_index = builder.addType(makeSig(
[kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32,
kWasmI32],
[kWasmI32]));
var sig_three = builder.addType(makeSig(
[kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64,
kWasmI64],
[]));
var zero = builder.addFunction("zero", kSig_i_i);
var one = builder.addFunction("one", sig_index);
var two = builder.addFunction("two", kSig_v_i);
var three = builder.addFunction("three", sig_three).addBody([]);
zero.addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0]);
one.addBody([
kExprLocalGet, 7,
kExprCallFunction, zero.index]);
two.addBody([
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprCallFunction, three.index,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprCallFunction, one.index,
kExprDrop,
]).exportFunc();
return builder.instantiate({});
})();
instance.exports.two()