[wasm][memory64] Fix atomics
This fixes a TODO about atomics and memory64 and removes the explicit CHECK that checks for the unsupported situation. Similar to other memory accesses, the memory index is supposed to be a 64-bit value if memory64 is being used. The bounds checking implementation in Liftoff and TurboFan is shared with non-atomic memory accesses, so this is already prepared for memory64. We only need to fix the expected type in the function body decoder, and prepare the assembler for 64-bit values. R=jkummerow@chromium.org Bug: v8:13636, v8:10949 Change-Id: I210ac488bd2bb1cb141e16597ca62d3fb27cad3b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4191767 Commit-Queue: Clemens Backes <clemensb@chromium.org> Reviewed-by: Jakob Kummerow <jkummerow@chromium.org> Cr-Commit-Position: refs/heads/main@{#85525}
This commit is contained in:
parent
b0d8319803
commit
76a817e03a
@ -4974,7 +4974,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
|
||||
std::tie(index, bounds_check_result) =
|
||||
CheckBoundsAndAlignment(info.machine_type.MemSize(), inputs[0], offset,
|
||||
position, enforce_bounds_check);
|
||||
// MemoryAccessKind::kUnalligned is impossible due to explicit aligment check.
|
||||
// MemoryAccessKind::kUnaligned is impossible due to explicit aligment check.
|
||||
MemoryAccessKind access_kind =
|
||||
bounds_check_result == WasmGraphBuilder::kTrapHandler
|
||||
? MemoryAccessKind::kProtected
|
||||
|
@ -1109,7 +1109,8 @@ inline void I64Store(LiftoffAssembler* lasm, LiftoffRegister dst,
|
||||
|
||||
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
|
||||
Register offset_reg, uint32_t offset_imm,
|
||||
LoadType type, LiftoffRegList /* pinned */) {
|
||||
LoadType type, LiftoffRegList /* pinned */,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() != LoadType::kI64Load) {
|
||||
Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true);
|
||||
dmb(ISH);
|
||||
@ -1137,7 +1138,8 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
|
||||
|
||||
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister src,
|
||||
StoreType type, LiftoffRegList pinned) {
|
||||
StoreType type, LiftoffRegList pinned,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, src, {},
|
||||
liftoff::I64Store);
|
||||
@ -1152,7 +1154,8 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
|
||||
liftoff::I64Binop<&Assembler::add, &Assembler::adc>);
|
||||
@ -1164,7 +1167,8 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
|
||||
liftoff::I64Binop<&Assembler::sub, &Assembler::sbc>);
|
||||
@ -1176,7 +1180,8 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
|
||||
liftoff::I64Binop<&Assembler::and_, &Assembler::and_>);
|
||||
@ -1188,7 +1193,8 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
|
||||
liftoff::I64Binop<&Assembler::orr, &Assembler::orr>);
|
||||
@ -1200,7 +1206,8 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
|
||||
liftoff::I64Binop<&Assembler::eor, &Assembler::eor>);
|
||||
@ -1213,7 +1220,8 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
|
||||
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm,
|
||||
LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
|
||||
liftoff::I64Store);
|
||||
@ -1293,7 +1301,7 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm,
|
||||
void LiftoffAssembler::AtomicCompareExchange(
|
||||
Register dst_addr, Register offset_reg, uint32_t offset_imm,
|
||||
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
|
||||
StoreType type) {
|
||||
StoreType type, bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicI64CompareExchange(this, dst_addr, offset_reg, offset_imm,
|
||||
expected, new_value, result);
|
||||
|
@ -736,7 +736,8 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
|
||||
|
||||
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
|
||||
Register offset_reg, uintptr_t offset_imm,
|
||||
LoadType type, LiftoffRegList /* pinned */) {
|
||||
LoadType type, LiftoffRegList /* pinned */,
|
||||
bool /* i64_offset */) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register src_reg = liftoff::CalculateActualAddress(
|
||||
this, src_addr, offset_reg, offset_imm, temps.AcquireX());
|
||||
@ -763,8 +764,8 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
|
||||
|
||||
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister src,
|
||||
StoreType type,
|
||||
LiftoffRegList /* pinned */) {
|
||||
StoreType type, LiftoffRegList /* pinned */,
|
||||
bool /* i64_offset */) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register dst_reg = liftoff::CalculateActualAddress(
|
||||
this, dst_addr, offset_reg, offset_imm, temps.AcquireX());
|
||||
@ -791,35 +792,40 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
|
||||
type, liftoff::Binop::kAdd);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
|
||||
type, liftoff::Binop::kSub);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
|
||||
type, liftoff::Binop::kAnd);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
|
||||
type, liftoff::Binop::kOr);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
|
||||
type, liftoff::Binop::kXor);
|
||||
}
|
||||
@ -827,7 +833,8 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
|
||||
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm,
|
||||
LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
|
||||
type, liftoff::Binop::kExchange);
|
||||
}
|
||||
@ -835,7 +842,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
|
||||
void LiftoffAssembler::AtomicCompareExchange(
|
||||
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
|
||||
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
|
||||
StoreType type) {
|
||||
StoreType type, bool /* i64_offset */) {
|
||||
LiftoffRegList pinned{dst_addr, offset_reg, expected, new_value};
|
||||
|
||||
Register result_reg = result.gp();
|
||||
|
@ -450,7 +450,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
|
||||
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
|
||||
Register offset_reg, uint32_t offset_imm,
|
||||
LoadType type, uint32_t* protected_load_pc,
|
||||
bool /* is_load_mem */, bool i64_offset,
|
||||
bool /* is_load_mem */, bool /* i64_offset */,
|
||||
bool needs_shift) {
|
||||
// Offsets >=2GB are statically OOB on 32-bit systems.
|
||||
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
|
||||
@ -598,7 +598,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
|
||||
Register offset_reg, uint32_t offset_imm,
|
||||
LoadType type, LiftoffRegList /* pinned */) {
|
||||
LoadType type, LiftoffRegList /* pinned */,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() != LoadType::kI64Load) {
|
||||
Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true);
|
||||
return;
|
||||
@ -617,7 +618,8 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
|
||||
|
||||
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister src,
|
||||
StoreType type, LiftoffRegList pinned) {
|
||||
StoreType type, LiftoffRegList pinned,
|
||||
bool /* i64_offset */) {
|
||||
DCHECK_NE(offset_reg, no_reg);
|
||||
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
|
||||
Operand dst_op = Operand(dst_addr, offset_reg, times_1, offset_imm);
|
||||
@ -957,7 +959,8 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr,
|
||||
|
||||
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicBinop64(this, liftoff::kAdd, dst_addr, offset_reg,
|
||||
offset_imm, value, result);
|
||||
@ -970,7 +973,8 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicBinop64(this, liftoff::kSub, dst_addr, offset_reg,
|
||||
offset_imm, value, result);
|
||||
@ -982,7 +986,8 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicBinop64(this, liftoff::kAnd, dst_addr, offset_reg,
|
||||
offset_imm, value, result);
|
||||
@ -995,7 +1000,8 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicBinop64(this, liftoff::kOr, dst_addr, offset_reg, offset_imm,
|
||||
value, result);
|
||||
@ -1008,7 +1014,8 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicBinop64(this, liftoff::kXor, dst_addr, offset_reg,
|
||||
offset_imm, value, result);
|
||||
@ -1022,7 +1029,8 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
|
||||
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
|
||||
uint32_t offset_imm,
|
||||
LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool /* i64_offset */) {
|
||||
if (type.value() == StoreType::kI64Store) {
|
||||
liftoff::AtomicBinop64(this, liftoff::kExchange, dst_addr, offset_reg,
|
||||
offset_imm, value, result);
|
||||
@ -1036,7 +1044,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
|
||||
void LiftoffAssembler::AtomicCompareExchange(
|
||||
Register dst_addr, Register offset_reg, uint32_t offset_imm,
|
||||
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
|
||||
StoreType type) {
|
||||
StoreType type, bool /* i64_offset */) {
|
||||
// We expect that the offset has already been added to {dst_addr}, and no
|
||||
// {offset_reg} is provided. This is to save registers.
|
||||
DCHECK_EQ(offset_reg, no_reg);
|
||||
|
@ -818,40 +818,47 @@ class LiftoffAssembler : public TurboAssembler {
|
||||
bool is_store_mem = false, bool i64_offset = false);
|
||||
inline void AtomicLoad(LiftoffRegister dst, Register src_addr,
|
||||
Register offset_reg, uintptr_t offset_imm,
|
||||
LoadType type, LiftoffRegList pinned);
|
||||
LoadType type, LiftoffRegList pinned, bool i64_offset);
|
||||
inline void AtomicStore(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister src,
|
||||
StoreType type, LiftoffRegList pinned);
|
||||
StoreType type, LiftoffRegList pinned,
|
||||
bool i64_offset);
|
||||
|
||||
inline void AtomicAdd(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type);
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset);
|
||||
|
||||
inline void AtomicSub(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type);
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset);
|
||||
|
||||
inline void AtomicAnd(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type);
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset);
|
||||
|
||||
inline void AtomicOr(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type);
|
||||
LiftoffRegister result, StoreType type, bool i64_offset);
|
||||
|
||||
inline void AtomicXor(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type);
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset);
|
||||
|
||||
inline void AtomicExchange(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type);
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset);
|
||||
|
||||
inline void AtomicCompareExchange(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm,
|
||||
LiftoffRegister expected,
|
||||
LiftoffRegister new_value,
|
||||
LiftoffRegister value, StoreType type);
|
||||
LiftoffRegister value, StoreType type,
|
||||
bool i64_offset);
|
||||
|
||||
inline void AtomicFence();
|
||||
|
||||
|
@ -4752,6 +4752,7 @@ class LiftoffCompiler {
|
||||
const MemoryAccessImmediate& imm) {
|
||||
LiftoffRegList pinned;
|
||||
LiftoffRegister value = pinned.set(__ PopToRegister());
|
||||
bool i64_offset = __ cache_state()->stack_state.back().kind() == kI64;
|
||||
LiftoffRegister full_index = __ PopToRegister(pinned);
|
||||
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
|
||||
full_index, pinned, kDoForceCheck);
|
||||
@ -4763,7 +4764,7 @@ class LiftoffCompiler {
|
||||
Register addr = pinned.set(GetMemoryStart(pinned));
|
||||
LiftoffRegList outer_pinned;
|
||||
if (V8_UNLIKELY(v8_flags.trace_wasm_memory)) outer_pinned.set(index);
|
||||
__ AtomicStore(addr, index, offset, value, type, outer_pinned);
|
||||
__ AtomicStore(addr, index, offset, value, type, outer_pinned, i64_offset);
|
||||
if (V8_UNLIKELY(v8_flags.trace_wasm_memory)) {
|
||||
TraceMemoryOperation(true, type.mem_rep(), index, offset,
|
||||
decoder->position());
|
||||
@ -4773,6 +4774,7 @@ class LiftoffCompiler {
|
||||
void AtomicLoadMem(FullDecoder* decoder, LoadType type,
|
||||
const MemoryAccessImmediate& imm) {
|
||||
ValueKind kind = type.value_type().kind();
|
||||
bool i64_offset = __ cache_state()->stack_state.back().kind() == kI64;
|
||||
LiftoffRegister full_index = __ PopToRegister();
|
||||
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
|
||||
full_index, {}, kDoForceCheck);
|
||||
@ -4784,7 +4786,7 @@ class LiftoffCompiler {
|
||||
Register addr = pinned.set(GetMemoryStart(pinned));
|
||||
RegClass rc = reg_class_for(kind);
|
||||
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
|
||||
__ AtomicLoad(value, addr, index, offset, type, pinned);
|
||||
__ AtomicLoad(value, addr, index, offset, type, pinned, i64_offset);
|
||||
__ PushRegister(kind, value);
|
||||
|
||||
if (V8_UNLIKELY(v8_flags.trace_wasm_memory)) {
|
||||
@ -4797,8 +4799,8 @@ class LiftoffCompiler {
|
||||
const MemoryAccessImmediate& imm,
|
||||
void (LiftoffAssembler::*emit_fn)(Register, Register,
|
||||
uintptr_t, LiftoffRegister,
|
||||
LiftoffRegister,
|
||||
StoreType)) {
|
||||
LiftoffRegister, StoreType,
|
||||
bool)) {
|
||||
ValueKind result_kind = type.value_type().kind();
|
||||
LiftoffRegList pinned;
|
||||
LiftoffRegister value = pinned.set(__ PopToRegister());
|
||||
@ -4818,6 +4820,7 @@ class LiftoffCompiler {
|
||||
LiftoffRegister result =
|
||||
pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
|
||||
#endif
|
||||
bool i64_offset = __ cache_state()->stack_state.back().kind() == kI64;
|
||||
LiftoffRegister full_index = __ PopToRegister(pinned);
|
||||
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
|
||||
full_index, pinned, kDoForceCheck);
|
||||
@ -4829,7 +4832,7 @@ class LiftoffCompiler {
|
||||
uintptr_t offset = imm.offset;
|
||||
Register addr = pinned.set(GetMemoryStart(pinned));
|
||||
|
||||
(asm_.*emit_fn)(addr, index, offset, value, result, type);
|
||||
(asm_.*emit_fn)(addr, index, offset, value, result, type, i64_offset);
|
||||
__ PushRegister(result_kind, result);
|
||||
}
|
||||
|
||||
@ -4859,6 +4862,7 @@ class LiftoffCompiler {
|
||||
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
|
||||
|
||||
// Pop the index from the stack.
|
||||
bool i64_offset = __ cache_state()->stack_state.back().kind() == kI64;
|
||||
__ DropValues(1);
|
||||
|
||||
LiftoffRegister result = expected;
|
||||
@ -4867,7 +4871,7 @@ class LiftoffCompiler {
|
||||
// We already added the index to addr, so we can just pass no_reg to the
|
||||
// assembler now.
|
||||
__ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result,
|
||||
type);
|
||||
type, i64_offset);
|
||||
__ PushRegister(type.value_type().kind(), result);
|
||||
return;
|
||||
#else
|
||||
@ -4875,6 +4879,7 @@ class LiftoffCompiler {
|
||||
LiftoffRegList pinned;
|
||||
LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned));
|
||||
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
|
||||
bool i64_offset = __ cache_state()->stack_state.back().kind() == kI64;
|
||||
LiftoffRegister full_index = __ PopToRegister(pinned);
|
||||
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
|
||||
full_index, pinned, kDoForceCheck);
|
||||
@ -4887,7 +4892,7 @@ class LiftoffCompiler {
|
||||
pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
|
||||
|
||||
__ AtomicCompareExchange(addr, index, offset, expected, new_value, result,
|
||||
type);
|
||||
type, i64_offset);
|
||||
__ PushRegister(result_kind, result);
|
||||
#endif
|
||||
}
|
||||
|
@ -452,8 +452,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
|
||||
|
||||
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
|
||||
Register offset_reg, uintptr_t offset_imm,
|
||||
LoadType type, LiftoffRegList /* pinned */) {
|
||||
Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true);
|
||||
LoadType type, LiftoffRegList /* pinned */,
|
||||
bool i64_offset) {
|
||||
Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true, i64_offset);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
|
||||
@ -549,8 +550,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister src,
|
||||
StoreType type,
|
||||
LiftoffRegList /* pinned */) {
|
||||
StoreType type, LiftoffRegList /* pinned */,
|
||||
bool i64_offset) {
|
||||
if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
|
||||
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
|
||||
Register src_reg = src.gp();
|
||||
if (cache_state()->is_used(src)) {
|
||||
@ -580,7 +582,9 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset) {
|
||||
if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
|
||||
DCHECK(!cache_state()->is_used(result));
|
||||
if (cache_state()->is_used(value)) {
|
||||
// We cannot overwrite {value}, but the {value} register is changed in the
|
||||
@ -622,7 +626,9 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
|
||||
|
||||
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset) {
|
||||
if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
|
||||
LiftoffRegList dont_overwrite =
|
||||
cache_state()->used_registers | LiftoffRegList{dst_addr, offset_reg};
|
||||
DCHECK(!dont_overwrite.has(result));
|
||||
@ -679,7 +685,9 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
|
||||
void (Assembler::*opq)(Register, Register),
|
||||
Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset) {
|
||||
if (offset_reg != no_reg && !i64_offset) __ AssertZeroExtended(offset_reg);
|
||||
DCHECK(!__ cache_state()->is_used(result));
|
||||
Register value_reg = value.gp();
|
||||
// The cmpxchg instruction uses rax to store the old value of the
|
||||
@ -752,29 +760,34 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
|
||||
|
||||
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset) {
|
||||
liftoff::AtomicBinop(this, &Assembler::andl, &Assembler::andq, dst_addr,
|
||||
offset_reg, offset_imm, value, result, type);
|
||||
offset_reg, offset_imm, value, result, type, i64_offset);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset) {
|
||||
liftoff::AtomicBinop(this, &Assembler::orl, &Assembler::orq, dst_addr,
|
||||
offset_reg, offset_imm, value, result, type);
|
||||
offset_reg, offset_imm, value, result, type, i64_offset);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset) {
|
||||
liftoff::AtomicBinop(this, &Assembler::xorl, &Assembler::xorq, dst_addr,
|
||||
offset_reg, offset_imm, value, result, type);
|
||||
offset_reg, offset_imm, value, result, type, i64_offset);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
|
||||
uintptr_t offset_imm,
|
||||
LiftoffRegister value,
|
||||
LiftoffRegister result, StoreType type) {
|
||||
LiftoffRegister result, StoreType type,
|
||||
bool i64_offset) {
|
||||
if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
|
||||
DCHECK(!cache_state()->is_used(result));
|
||||
if (cache_state()->is_used(value)) {
|
||||
// We cannot overwrite {value}, but the {value} register is changed in the
|
||||
@ -816,7 +829,8 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
|
||||
void LiftoffAssembler::AtomicCompareExchange(
|
||||
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
|
||||
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
|
||||
StoreType type) {
|
||||
StoreType type, bool i64_offset) {
|
||||
if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
|
||||
Register value_reg = new_value.gp();
|
||||
// The cmpxchg instruction uses rax to store the old value of the
|
||||
// compare-exchange primitive. Therefore we have to spill the register and
|
||||
|
@ -6222,28 +6222,30 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
|
||||
opcode_length, ElementSizeLog2Of(memtype.representation()));
|
||||
if (!this->Validate(this->pc_ + opcode_length, imm)) return false;
|
||||
|
||||
// TODO(10949): Fix this for memory64 (index type should be kWasmI64
|
||||
// then).
|
||||
CHECK(!this->module_->is_memory64);
|
||||
ArgVector args = PeekArgs(sig);
|
||||
if (sig->return_count() == 0) {
|
||||
if (V8_LIKELY(
|
||||
!CheckStaticallyOutOfBounds(memtype.MemSize(), imm.offset))) {
|
||||
CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode,
|
||||
base::VectorOf(args), imm, nullptr);
|
||||
}
|
||||
DropArgs(sig);
|
||||
} else {
|
||||
DCHECK_EQ(1, sig->return_count());
|
||||
Value result = CreateValue(sig->GetReturn());
|
||||
if (V8_LIKELY(
|
||||
!CheckStaticallyOutOfBounds(memtype.MemSize(), imm.offset))) {
|
||||
CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode,
|
||||
base::VectorOf(args), imm, &result);
|
||||
}
|
||||
DropArgs(sig);
|
||||
Push(result);
|
||||
int parameter_count = static_cast<int>(sig->parameter_count());
|
||||
DCHECK_LE(1, parameter_count);
|
||||
DCHECK_EQ(kWasmI32, sig->GetParam(0));
|
||||
EnsureStackArguments(parameter_count);
|
||||
ArgVector args(stack_value(parameter_count), parameter_count);
|
||||
ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
|
||||
ValidateArgType(args, 0, mem_type);
|
||||
for (int i = 1; i < parameter_count; i++) {
|
||||
ValidateArgType(args, i, sig->GetParam(i));
|
||||
}
|
||||
|
||||
base::Optional<Value> result;
|
||||
if (sig->return_count()) {
|
||||
DCHECK_EQ(1, sig->return_count());
|
||||
result = CreateValue(sig->GetReturn());
|
||||
}
|
||||
|
||||
if (V8_LIKELY(!CheckStaticallyOutOfBounds(memtype.MemSize(), imm.offset))) {
|
||||
CALL_INTERFACE_IF_OK_AND_REACHABLE(
|
||||
AtomicOp, opcode, base::VectorOf(args), imm,
|
||||
result.has_value() ? &result.value() : nullptr);
|
||||
}
|
||||
DropArgs(sig);
|
||||
if (result.has_value()) Push(result.value());
|
||||
return opcode_length + imm.length;
|
||||
}
|
||||
|
||||
|
@ -13,26 +13,31 @@ const GB = 1024 * 1024 * 1024;
|
||||
// The current limit is 16GB. Adapt this test if this changes.
|
||||
const max_num_pages = 16 * GB / kPageSize;
|
||||
|
||||
function BasicMemory64Tests(num_pages) {
|
||||
function BasicMemory64Tests(num_pages, use_atomic_ops) {
|
||||
const num_bytes = num_pages * kPageSize;
|
||||
print(`Testing ${num_bytes} bytes (${num_pages} pages)`);
|
||||
print(`Testing ${num_bytes} bytes (${num_pages} pages) on ${
|
||||
use_atomic_ops ? '' : 'non-'}atomic memory`);
|
||||
|
||||
let builder = new WasmModuleBuilder();
|
||||
builder.addMemory64(num_pages, num_pages, true);
|
||||
|
||||
// A memory operation with alignment (0) and offset (0).
|
||||
let op = (non_atomic, atomic) => use_atomic_ops ?
|
||||
[kAtomicPrefix, atomic, 0, 0] :
|
||||
[non_atomic, 0, 0];
|
||||
builder.addFunction('load', makeSig([kWasmF64], [kWasmI32]))
|
||||
.addBody([
|
||||
kExprLocalGet, 0, // local.get 0
|
||||
kExprI64UConvertF64, // i64.uconvert_sat.f64
|
||||
kExprI32LoadMem, 0, 0, // i32.load_mem align=1 offset=0
|
||||
kExprLocalGet, 0, // local.get 0
|
||||
kExprI64UConvertF64, // i64.uconvert_sat.f64
|
||||
...op(kExprI32LoadMem, kExprI32AtomicLoad) // load
|
||||
])
|
||||
.exportFunc();
|
||||
builder.addFunction('store', makeSig([kWasmF64, kWasmI32], []))
|
||||
.addBody([
|
||||
kExprLocalGet, 0, // local.get 0
|
||||
kExprI64UConvertF64, // i64.uconvert_sat.f64
|
||||
kExprLocalGet, 1, // local.get 1
|
||||
kExprI32StoreMem, 0, 0, // i32.store_mem align=1 offset=0
|
||||
kExprLocalGet, 0, // local.get 0
|
||||
kExprI64UConvertF64, // i64.uconvert_sat.f64
|
||||
kExprLocalGet, 1, // local.get 1
|
||||
...op(kExprI32StoreMem, kExprI32AtomicStore) // store
|
||||
])
|
||||
.exportFunc();
|
||||
|
||||
@ -56,19 +61,42 @@ function BasicMemory64Tests(num_pages) {
|
||||
assertEquals(num_bytes, array.length);
|
||||
}
|
||||
|
||||
const GB = Math.pow(2, 30);
|
||||
assertEquals(0, load(num_bytes - 4));
|
||||
assertThrows(() => load(num_bytes - 3));
|
||||
assertTraps(kTrapMemOutOfBounds, () => load(num_bytes));
|
||||
assertTraps(kTrapMemOutOfBounds, () => load(num_bytes - 3));
|
||||
assertTraps(kTrapMemOutOfBounds, () => load(num_bytes - 4 + 4 * GB));
|
||||
assertTraps(kTrapMemOutOfBounds, () => store(num_bytes));
|
||||
assertTraps(kTrapMemOutOfBounds, () => store(num_bytes - 3));
|
||||
assertTraps(kTrapMemOutOfBounds, () => store(num_bytes - 4 + 4 * GB));
|
||||
if (use_atomic_ops) {
|
||||
assertTraps(kTrapUnalignedAccess, () => load(num_bytes - 7));
|
||||
assertTraps(kTrapUnalignedAccess, () => store(num_bytes - 7));
|
||||
}
|
||||
|
||||
store(num_bytes - 4, 0x12345678);
|
||||
assertEquals(0x12345678, load(num_bytes - 4));
|
||||
|
||||
let kStoreOffset = 27;
|
||||
let kStoreOffset = use_atomic_ops ? 40 : 27;
|
||||
store(kStoreOffset, 11);
|
||||
assertEquals(11, load(kStoreOffset));
|
||||
|
||||
// Now check 100 random positions.
|
||||
for (let i = 0; i < 100; ++i) {
|
||||
let position = Math.floor(Math.random() * num_bytes);
|
||||
// Now check some interesting positions, plus 100 random positions.
|
||||
const positions = [
|
||||
// Nothing at the beginning.
|
||||
0, 1,
|
||||
// Check positions around the store offset.
|
||||
kStoreOffset - 1, kStoreOffset, kStoreOffset + 1,
|
||||
// Check the end.
|
||||
num_bytes - 5, num_bytes - 4, num_bytes - 3, num_bytes - 2, num_bytes - 1,
|
||||
// Check positions at the end, truncated to 32 bit (might be
|
||||
// redundant).
|
||||
(num_bytes - 5) >>> 0, (num_bytes - 4) >>> 0, (num_bytes - 3) >>> 0,
|
||||
(num_bytes - 2) >>> 0, (num_bytes - 1) >>> 0
|
||||
];
|
||||
const random_positions =
|
||||
Array.from({length: 100}, () => Math.floor(Math.random() * num_bytes));
|
||||
for (let position of positions.concat(random_positions)) {
|
||||
let expected = 0;
|
||||
if (position == kStoreOffset) {
|
||||
expected = 11;
|
||||
@ -371,3 +399,15 @@ function allowOOM(fn) {
|
||||
assertEquals(kValue, instance.exports.load(kOffset2));
|
||||
assertEquals(5n, instance.exports.grow(1n));
|
||||
})();
|
||||
|
||||
(function TestAtomics_SmallMemory() {
|
||||
print(arguments.callee.name);
|
||||
BasicMemory64Tests(4, true);
|
||||
})();
|
||||
|
||||
(function TestAtomics_5GB() {
|
||||
print(arguments.callee.name);
|
||||
let num_pages = 5 * GB / kPageSize;
|
||||
// This test can fail if 5GB of memory cannot be allocated.
|
||||
allowOOM(() => BasicMemory64Tests(num_pages, true));
|
||||
})();
|
||||
|
Loading…
Reference in New Issue
Block a user