Revert "[liftoff][arm64] Zero-extend offsets also for SIMD"

This reverts commit b99fe75c6d.

Reason for revert:
https://ci.chromium.org/p/v8/builders/ci/V8%20Linux/43105

Original change's description:
> [liftoff][arm64] Zero-extend offsets also for SIMD
>
> This extends https://crrev.com/c/2917612 also for SIMD, which
> (sometimes) uses the special {GetMemOpWithImmOffsetZero} method.
> As part of this CL, that method is renamed to {GetEffectiveAddress}
> which IMO is a better name. Also, it just returns a register to make the
> semantic of that function obvious in the signature.
>
> Drive-by: When sign extending to 32 bit, only write to the W portion of
>           the register. This is a bit cleaner, and I first thought that
>           this would be the bug.
>
> R=​jkummerow@chromium.org
> CC=​​thibaudm@chromium.org
>
> Bug: chromium:1231950, v8:12018
> Change-Id: Ifaefe1f18e3a00534a30c99e3c37ed09d9508f6e
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3049073
> Reviewed-by: Zhi An Ng <zhin@chromium.org>
> Commit-Queue: Clemens Backes <clemensb@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#75898}

Bug: chromium:1231950, v8:12018
Change-Id: I4e7a9d6fa6809b7c4d9be919cd5698737d784849
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3049085
Auto-Submit: Michael Achenbach <machenbach@chromium.org>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/master@{#75900}
This commit is contained in:
Michael Achenbach 2021-07-23 20:22:38 +00:00 committed by V8 LUCI CQ
parent 694b0334f1
commit 7b455bf2b9
3 changed files with 26 additions and 46 deletions

View File

@ -138,23 +138,27 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
: MemOperand(effective_addr, offset.W(), UXTW); : MemOperand(effective_addr, offset.W(), UXTW);
} }
// Compute the effective address (sum of |addr|, |offset| (if given) and // Certain load instructions do not support offset (register or immediate).
// |offset_imm|) into a temporary register. This is needed for certain load // This creates a MemOperand that is suitable for such instructions by adding
// instructions that do not support an offset (register or immediate). // |addr|, |offset| (if needed), and |offset_imm| into a temporary.
// Returns |addr| if both |offset| and |offset_imm| are zero. inline MemOperand GetMemOpWithImmOffsetZero(LiftoffAssembler* assm,
inline Register GetEffectiveAddress(LiftoffAssembler* assm, UseScratchRegisterScope* temps,
UseScratchRegisterScope* temps, Register addr, Register offset,
Register addr, Register offset, uintptr_t offset_imm) {
uintptr_t offset_imm) {
if (!offset.is_valid() && offset_imm == 0) return addr;
Register tmp = temps->AcquireX(); Register tmp = temps->AcquireX();
if (offset.is_valid()) { if (offset.is_valid()) {
// TODO(clemensb): This needs adaption for memory64. // offset has passed BoundsCheckMem in liftoff-compiler, and been unsigned
assm->Add(tmp, addr, Operand(offset, UXTW)); // extended, so it is fine to use the full width of the register.
addr = tmp; assm->Add(tmp, addr, offset);
if (offset_imm != 0) {
assm->Add(tmp, tmp, offset_imm);
}
} else {
if (offset_imm != 0) {
assm->Add(tmp, addr, offset_imm);
}
} }
if (offset_imm != 0) assm->Add(tmp, addr, offset_imm); return MemOperand(tmp.X(), 0);
return tmp;
} }
enum class ShiftDirection : bool { kLeft, kRight }; enum class ShiftDirection : bool { kLeft, kRight };
@ -1497,11 +1501,11 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
} }
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) { void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
sxtb(dst.W(), src.W()); sxtb(dst, src);
} }
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) { void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
sxth(dst.W(), src.W()); sxth(dst, src);
} }
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst, void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
@ -1635,8 +1639,8 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
MemOperand src_op = MemOperand src_op =
transform == LoadTransformationKind::kSplat transform == LoadTransformationKind::kSplat
? MemOperand{liftoff::GetEffectiveAddress(this, &temps, src_addr, ? liftoff::GetMemOpWithImmOffsetZero(this, &temps, src_addr,
offset_reg, offset_imm)} offset_reg, offset_imm)
: liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm); : liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
*protected_load_pc = pc_offset(); *protected_load_pc = pc_offset();
MachineType memtype = type.mem_type(); MachineType memtype = type.mem_type();
@ -1687,8 +1691,8 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
uintptr_t offset_imm, LoadType type, uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) { uint8_t laneidx, uint32_t* protected_load_pc) {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
MemOperand src_op{ MemOperand src_op = liftoff::GetMemOpWithImmOffsetZero(
liftoff::GetEffectiveAddress(this, &temps, addr, offset_reg, offset_imm)}; this, &temps, addr, offset_reg, offset_imm);
*protected_load_pc = pc_offset(); *protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type(); MachineType mem_type = type.mem_type();
@ -1714,8 +1718,8 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
StoreType type, uint8_t lane, StoreType type, uint8_t lane,
uint32_t* protected_store_pc) { uint32_t* protected_store_pc) {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
MemOperand dst_op{ MemOperand dst_op =
liftoff::GetEffectiveAddress(this, &temps, dst, offset, offset_imm)}; liftoff::GetMemOpWithImmOffsetZero(this, &temps, dst, offset, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset(); if (protected_store_pc) *protected_store_pc = pc_offset();
MachineRepresentation rep = type.mem_rep(); MachineRepresentation rep = type.mem_rep();

View File

@ -1456,12 +1456,6 @@
'concurrent-initial-prototype-change-1': [SKIP], 'concurrent-initial-prototype-change-1': [SKIP],
}], # variant == concurrent_inlining }], # variant == concurrent_inlining
##############################################################################
['variant == instruction_scheduling or variant == stress_instruction_scheduling', {
# BUG(12018): This test currently fails with --turbo-instruction-scheduling.
'regress/wasm/regress-1231950': [SKIP],
}], # variant == instruction_scheduling or variant == stress_instruction_scheduling
################################################################################ ################################################################################
['single_generation', { ['single_generation', {
# These tests rely on allocation site tracking which only works in the young generation. # These tests rely on allocation site tracking which only works in the young generation.

View File

@ -1,18 +0,0 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
builder.addMemory(1, 1);
builder.addFunction('main', kSig_d_v)
.addBody([
...wasmI32Const(-3), // i32.const
kExprI32SExtendI8, // i32.extend8_s
kSimdPrefix, kExprS128Load32Splat, 0x01, 0x02, // s128.load32_splat
kExprUnreachable, // unreachable
])
.exportFunc();
const instance = builder.instantiate();
assertTraps(kTrapMemOutOfBounds, instance.exports.main);