[wasm-gc] Liftoff support part 2
This adds support for the following instructions: br_on_null, ref.as_non_null, br_on_cast, i31.new Bug: v8:7748 Change-Id: I210b8979327ea0031f89748b71b51abbac10bb8b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2590041 Commit-Queue: Jakob Kummerow <jkummerow@chromium.org> Reviewed-by: Clemens Backes <clemensb@chromium.org> Cr-Commit-Position: refs/heads/master@{#71773}
This commit is contained in:
parent
4faf8b52bf
commit
5e18ab5019
@ -2064,7 +2064,7 @@ void TurboAssembler::Abort(AbortReason reason) {
|
||||
// will not return here
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadMap(Register destination, Register object) {
|
||||
void TurboAssembler::LoadMap(Register destination, Register object) {
|
||||
ldr(destination, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
}
|
||||
|
||||
|
@ -523,6 +523,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void JumpIfEqual(Register x, int32_t y, Label* dest);
|
||||
void JumpIfLessThan(Register x, int32_t y, Label* dest);
|
||||
|
||||
void LoadMap(Register destination, Register object);
|
||||
|
||||
// Performs a truncating conversion of a floating point number as used by
|
||||
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
|
||||
// succeeds, otherwise falls through if result is saturated. On return
|
||||
@ -653,8 +655,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
void LeaveExitFrame(bool save_doubles, Register argument_count,
|
||||
bool argument_count_is_length = false);
|
||||
|
||||
void LoadMap(Register destination, Register object);
|
||||
|
||||
// Load the global proxy from the current context.
|
||||
void LoadGlobalProxy(Register dst);
|
||||
|
||||
|
@ -2669,7 +2669,7 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
|
||||
CompareInstanceType(map, type_reg, type);
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadMap(Register dst, Register object) {
|
||||
void TurboAssembler::LoadMap(Register dst, Register object) {
|
||||
LoadTaggedPointerField(dst, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
}
|
||||
|
||||
|
@ -931,6 +931,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
inline void JumpIfEqual(Register x, int32_t y, Label* dest);
|
||||
inline void JumpIfLessThan(Register x, int32_t y, Label* dest);
|
||||
|
||||
void LoadMap(Register dst, Register object);
|
||||
|
||||
inline void Fmov(VRegister fd, VRegister fn);
|
||||
inline void Fmov(VRegister fd, Register rn);
|
||||
// Provide explicit double and float interfaces for FP immediate moves, rather
|
||||
@ -1966,8 +1968,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
void LeaveExitFrame(bool save_doubles, const Register& scratch,
|
||||
const Register& scratch2);
|
||||
|
||||
void LoadMap(Register dst, Register object);
|
||||
|
||||
// Load the global proxy from the current context.
|
||||
void LoadGlobalProxy(Register dst);
|
||||
|
||||
|
@ -1497,7 +1497,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
|
||||
}
|
||||
|
||||
I32_BINOP_I(i32_add, add)
|
||||
I32_BINOP(i32_sub, sub)
|
||||
I32_BINOP_I(i32_sub, sub)
|
||||
I32_BINOP(i32_mul, mul)
|
||||
I32_BINOP_I(i32_and, and_)
|
||||
I32_BINOP_I(i32_or, orr)
|
||||
@ -2170,6 +2170,14 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
|
||||
b(label, cond);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
|
||||
Label* label, Register lhs,
|
||||
int32_t imm) {
|
||||
Condition cond = liftoff::ToCondition(liftoff_cond);
|
||||
cmp(lhs, Operand(imm));
|
||||
b(label, cond);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
|
||||
clz(dst, src);
|
||||
mov(dst, Operand(dst, LSR, kRegSizeInBitsLog2));
|
||||
@ -2268,6 +2276,13 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
|
||||
return false;
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
|
||||
SmiCheckMode mode) {
|
||||
tst(obj, Operand(kSmiTagMask));
|
||||
Condition condition = mode == kJumpOnSmi ? eq : ne;
|
||||
b(condition, target);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
|
||||
Register offset_reg, uintptr_t offset_imm,
|
||||
LoadType type,
|
||||
|
@ -994,7 +994,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
|
||||
}
|
||||
|
||||
I32_BINOP_I(i32_add, Add)
|
||||
I32_BINOP(i32_sub, Sub)
|
||||
I32_BINOP_I(i32_sub, Sub)
|
||||
I32_BINOP(i32_mul, Mul)
|
||||
I32_BINOP_I(i32_and, And)
|
||||
I32_BINOP_I(i32_or, Orr)
|
||||
@ -1472,6 +1472,14 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
|
||||
B(label, cond);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
|
||||
Label* label, Register lhs,
|
||||
int32_t imm) {
|
||||
Condition cond = liftoff::ToCondition(liftoff_cond);
|
||||
Cmp(lhs.W(), Operand(imm));
|
||||
B(label, cond);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
|
||||
Cmp(src.W(), wzr);
|
||||
Cset(dst.W(), eq);
|
||||
@ -1529,6 +1537,13 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
|
||||
return false;
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
|
||||
SmiCheckMode mode) {
|
||||
Label* smi_label = mode == kJumpOnSmi ? target : nullptr;
|
||||
Label* not_smi_label = mode == kJumpOnNotSmi ? target : nullptr;
|
||||
JumpIfSmi(obj, smi_label, not_smi_label);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
|
||||
Register offset_reg, uintptr_t offset_imm,
|
||||
LoadType type,
|
||||
|
@ -1231,6 +1231,16 @@ void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
|
||||
}
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_i32_subi(Register dst, Register lhs, int32_t imm) {
|
||||
if (dst != lhs) {
|
||||
// We'll have to implement an UB-safe version if we need this corner case.
|
||||
DCHECK_NE(imm, kMinInt);
|
||||
lea(dst, Operand(lhs, -imm));
|
||||
} else {
|
||||
sub(dst, Immediate(imm));
|
||||
}
|
||||
}
|
||||
|
||||
namespace liftoff {
|
||||
template <void (Assembler::*op)(Register, Register)>
|
||||
void EmitCommutativeBinOp(LiftoffAssembler* assm, Register dst, Register lhs,
|
||||
@ -2392,6 +2402,14 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
|
||||
j(cond, label);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
|
||||
Label* label, Register lhs,
|
||||
int imm) {
|
||||
Condition cond = liftoff::ToCondition(liftoff_cond);
|
||||
cmp(lhs, Immediate(imm));
|
||||
j(cond, label);
|
||||
}
|
||||
|
||||
namespace liftoff {
|
||||
|
||||
// Setcc into dst register, given a scratch byte register (might be the same as
|
||||
@ -2533,6 +2551,13 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
|
||||
return false;
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
|
||||
SmiCheckMode mode) {
|
||||
test_b(obj, Immediate(kSmiTagMask));
|
||||
Condition condition = mode == kJumpOnSmi ? zero : not_zero;
|
||||
j(condition, target);
|
||||
}
|
||||
|
||||
namespace liftoff {
|
||||
template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
|
||||
void (Assembler::*sse_op)(XMMRegister, XMMRegister)>
|
||||
|
@ -541,6 +541,21 @@ class LiftoffAssembler : public TurboAssembler {
|
||||
LiftoffRegList pinned);
|
||||
inline void StoreTaggedPointer(Register dst_addr, int32_t offset_imm,
|
||||
LiftoffRegister src, LiftoffRegList pinned);
|
||||
inline void LoadFixedArrayLengthAsInt32(LiftoffRegister dst, Register array,
|
||||
LiftoffRegList pinned) {
|
||||
int offset = FixedArray::kLengthOffset - kHeapObjectTag;
|
||||
if (SmiValuesAre32Bits()) {
|
||||
#if V8_TARGET_LITTLE_ENDIAN
|
||||
DCHECK_EQ(kSmiShiftSize + kSmiTagSize, 4 * kBitsPerByte);
|
||||
offset += 4;
|
||||
#endif
|
||||
Load(dst, array, no_reg, offset, LoadType::kI32Load, pinned);
|
||||
} else {
|
||||
DCHECK(SmiValuesAre31Bits());
|
||||
Load(dst, array, no_reg, offset, LoadType::kI32Load, pinned);
|
||||
emit_i32_sari(dst.gp(), dst.gp(), kSmiTagSize);
|
||||
}
|
||||
}
|
||||
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
|
||||
uintptr_t offset_imm, LoadType type, LiftoffRegList pinned,
|
||||
uint32_t* protected_load_pc = nullptr,
|
||||
@ -612,6 +627,7 @@ class LiftoffAssembler : public TurboAssembler {
|
||||
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
|
||||
inline void emit_i32_addi(Register dst, Register lhs, int32_t imm);
|
||||
inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
|
||||
inline void emit_i32_subi(Register dst, Register lhs, int32_t imm);
|
||||
inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
|
||||
inline void emit_i32_divs(Register dst, Register lhs, Register rhs,
|
||||
Label* trap_div_by_zero,
|
||||
@ -812,6 +828,8 @@ class LiftoffAssembler : public TurboAssembler {
|
||||
|
||||
inline void emit_cond_jump(LiftoffCondition, Label*, ValueType value,
|
||||
Register lhs, Register rhs = no_reg);
|
||||
inline void emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, Label* label,
|
||||
Register lhs, int imm);
|
||||
// Set {dst} to 1 if condition holds, 0 otherwise.
|
||||
inline void emit_i32_eqz(Register dst, Register src);
|
||||
inline void emit_i32_set_cond(LiftoffCondition, Register dst, Register lhs,
|
||||
@ -830,6 +848,9 @@ class LiftoffAssembler : public TurboAssembler {
|
||||
LiftoffRegister true_value,
|
||||
LiftoffRegister false_value, ValueType type);
|
||||
|
||||
enum SmiCheckMode { kJumpOnSmi, kJumpOnNotSmi };
|
||||
inline void emit_smi_check(Register obj, Label* target, SmiCheckMode mode);
|
||||
|
||||
inline void LoadTransform(LiftoffRegister dst, Register src_addr,
|
||||
Register offset_reg, uintptr_t offset_imm,
|
||||
LoadType type, LoadTransformationKind transform,
|
||||
|
@ -1716,7 +1716,10 @@ class LiftoffCompiler {
|
||||
}
|
||||
|
||||
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
|
||||
unsupported(decoder, kRefTypes, "ref.as_non_null");
|
||||
LiftoffRegList pinned;
|
||||
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
|
||||
MaybeEmitNullCheck(decoder, obj.gp(), pinned, arg.type);
|
||||
__ PushRegister(ValueType::Ref(arg.type.heap_type(), kNonNullable), obj);
|
||||
}
|
||||
|
||||
void Drop(FullDecoder* decoder, const Value& value) {
|
||||
@ -2596,7 +2599,24 @@ class LiftoffCompiler {
|
||||
}
|
||||
|
||||
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
|
||||
unsupported(decoder, kRefTypes, "br_on_null");
|
||||
// Before branching, materialize all constants. This avoids repeatedly
|
||||
// materializing them for each conditional branch.
|
||||
if (depth != decoder->control_depth() - 1) {
|
||||
__ MaterializeMergedConstants(
|
||||
decoder->control_at(depth)->br_merge()->arity);
|
||||
}
|
||||
|
||||
Label cont_false;
|
||||
LiftoffRegList pinned;
|
||||
LiftoffRegister ref = pinned.set(__ PopToRegister(pinned));
|
||||
Register null = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
|
||||
LoadNullValue(null, pinned);
|
||||
__ emit_cond_jump(kUnequal, &cont_false, ref_object.type, ref.gp(), null);
|
||||
|
||||
BrOrRet(decoder, depth);
|
||||
__ bind(&cont_false);
|
||||
__ PushRegister(ValueType::Ref(ref_object.type.heap_type(), kNonNullable),
|
||||
ref);
|
||||
}
|
||||
|
||||
template <ValueType::Kind src_type, ValueType::Kind result_type,
|
||||
@ -3935,9 +3955,20 @@ class LiftoffCompiler {
|
||||
}
|
||||
|
||||
void I31New(FullDecoder* decoder, const Value& input, Value* result) {
|
||||
// TODO(7748): Implement.
|
||||
unsupported(decoder, kGC, "i31.new");
|
||||
LiftoffRegister src = __ PopToRegister();
|
||||
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {});
|
||||
if (SmiValuesAre31Bits()) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ emit_i32_shli(dst.gp(), src.gp(), kSmiTagSize);
|
||||
} else {
|
||||
DCHECK(SmiValuesAre32Bits());
|
||||
// 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
|
||||
constexpr int kI31To32BitSmiShift = 33;
|
||||
__ emit_i64_shli(dst, src, kI31To32BitSmiShift);
|
||||
}
|
||||
__ PushRegister(kWasmI31Ref, dst);
|
||||
}
|
||||
|
||||
void I31GetS(FullDecoder* decoder, const Value& input, Value* result) {
|
||||
// TODO(7748): Implement.
|
||||
unsupported(decoder, kGC, "i31.get_s");
|
||||
@ -4003,13 +4034,95 @@ class LiftoffCompiler {
|
||||
}
|
||||
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
|
||||
Value* result_on_branch, uint32_t depth) {
|
||||
// TODO(7748): Implement.
|
||||
unsupported(decoder, kGC, "br_on_cast");
|
||||
// Before branching, materialize all constants. This avoids repeatedly
|
||||
// materializing them for each conditional branch.
|
||||
if (depth != decoder->control_depth() - 1) {
|
||||
__ MaterializeMergedConstants(
|
||||
decoder->control_at(depth)->br_merge()->arity);
|
||||
}
|
||||
|
||||
Label branch, cont_false;
|
||||
LiftoffRegList pinned;
|
||||
LiftoffRegister rtt_reg = pinned.set(__ PopToRegister(pinned));
|
||||
LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
|
||||
|
||||
bool obj_can_be_i31 = IsSubtypeOf(kWasmI31Ref, obj.type, decoder->module_);
|
||||
bool rtt_is_i31 = rtt.type.heap_representation() == HeapType::kI31;
|
||||
bool i31_check_only = obj_can_be_i31 && rtt_is_i31;
|
||||
if (i31_check_only) {
|
||||
__ emit_smi_check(obj_reg.gp(), &cont_false,
|
||||
LiftoffAssembler::kJumpOnNotSmi);
|
||||
// Emit no further code, just fall through to taking the branch.
|
||||
} else {
|
||||
// Reserve all temporary registers up front, so that the cache state
|
||||
// tracking doesn't get confused by the following conditional jumps.
|
||||
LiftoffRegister tmp1 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||
LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||
if (obj_can_be_i31) {
|
||||
DCHECK(!rtt_is_i31);
|
||||
__ emit_smi_check(obj_reg.gp(), &cont_false,
|
||||
LiftoffAssembler::kJumpOnSmi);
|
||||
}
|
||||
if (obj.type.is_nullable()) {
|
||||
LoadNullValue(tmp1.gp(), pinned);
|
||||
__ emit_cond_jump(kEqual, &cont_false, obj.type, obj_reg.gp(),
|
||||
tmp1.gp());
|
||||
}
|
||||
|
||||
// At this point, the object is neither null nor an i31ref. Perform
|
||||
// a regular type check. Check for exact match first.
|
||||
__ LoadMap(tmp1.gp(), obj_reg.gp());
|
||||
// {tmp1} now holds the object's map.
|
||||
__ emit_cond_jump(kEqual, &branch, rtt.type, tmp1.gp(), rtt_reg.gp());
|
||||
|
||||
// If the object isn't guaranteed to be an array or struct, check that.
|
||||
// Subsequent code wouldn't handle e.g. funcrefs.
|
||||
if (!is_data_ref_type(obj.type, decoder->module_)) {
|
||||
EmitDataRefCheck(tmp1.gp(), &cont_false, tmp2, pinned);
|
||||
}
|
||||
|
||||
// Constant-time subtyping check: load exactly one candidate RTT from the
|
||||
// supertypes list.
|
||||
// Step 1: load the WasmTypeInfo into {tmp1}.
|
||||
constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged(
|
||||
Map::kConstructorOrBackPointerOrNativeContextOffset);
|
||||
__ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kTypeInfoOffset,
|
||||
pinned);
|
||||
// Step 2: load the super types list into {tmp1}.
|
||||
constexpr int kSuperTypesOffset =
|
||||
wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset);
|
||||
__ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kSuperTypesOffset,
|
||||
pinned);
|
||||
// Step 3: check the list's length.
|
||||
LiftoffRegister list_length = tmp2;
|
||||
__ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
|
||||
__ emit_i32_cond_jumpi(kUnsignedLessEqual, &cont_false, list_length.gp(),
|
||||
rtt.type.depth());
|
||||
// Step 4: load the candidate list slot into {tmp1}, and compare it.
|
||||
__ LoadTaggedPointer(
|
||||
tmp1.gp(), tmp1.gp(), no_reg,
|
||||
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
|
||||
pinned);
|
||||
__ emit_cond_jump(kUnequal, &cont_false, rtt.type, tmp1.gp(),
|
||||
rtt_reg.gp());
|
||||
// Fall through to taking the branch.
|
||||
}
|
||||
|
||||
__ bind(&branch);
|
||||
__ PushRegister(rtt.type.is_bottom()
|
||||
? kWasmBottom
|
||||
: ValueType::Ref(rtt.type.heap_type(), kNonNullable),
|
||||
obj_reg);
|
||||
BrOrRet(decoder, depth);
|
||||
|
||||
__ bind(&cont_false);
|
||||
// Drop the branch's value, restore original value.
|
||||
Drop(decoder, obj);
|
||||
__ PushRegister(obj.type, obj_reg);
|
||||
}
|
||||
|
||||
void PassThrough(FullDecoder* decoder, const Value& from, Value* to) {
|
||||
// TODO(7748): Implement.
|
||||
unsupported(decoder, kGC, "");
|
||||
// Nothing to do here.
|
||||
}
|
||||
|
||||
private:
|
||||
@ -4264,6 +4377,32 @@ class LiftoffCompiler {
|
||||
}
|
||||
}
|
||||
|
||||
void EmitDataRefCheck(Register map, Label* not_data_ref, LiftoffRegister tmp,
|
||||
LiftoffRegList pinned) {
|
||||
constexpr int kInstanceTypeOffset =
|
||||
wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset);
|
||||
__ Load(tmp, map, no_reg, kInstanceTypeOffset, LoadType::kI32Load16U,
|
||||
pinned);
|
||||
// We're going to test a range of instance types with a single unsigned
|
||||
// comparison. Statically assert that this is safe, i.e. that there are
|
||||
// no instance types between array and struct types that might possibly
|
||||
// occur (i.e. internal types are OK, types of Wasm objects are not).
|
||||
// At the time of this writing:
|
||||
// WASM_ARRAY_TYPE = 180
|
||||
// WASM_CAPI_FUNCTION_DATA_TYPE = 181
|
||||
// WASM_STRUCT_TYPE = 182
|
||||
// The specific values don't matter; the relative order does.
|
||||
static_assert(
|
||||
WASM_STRUCT_TYPE == static_cast<InstanceType>(WASM_ARRAY_TYPE + 2),
|
||||
"Relying on specific InstanceType values here");
|
||||
static_assert(WASM_CAPI_FUNCTION_DATA_TYPE ==
|
||||
static_cast<InstanceType>(WASM_ARRAY_TYPE + 1),
|
||||
"Relying on specific InstanceType values here");
|
||||
__ emit_i32_subi(tmp.gp(), tmp.gp(), WASM_ARRAY_TYPE);
|
||||
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, not_data_ref, tmp.gp(),
|
||||
WASM_STRUCT_TYPE - WASM_ARRAY_TYPE);
|
||||
}
|
||||
|
||||
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
|
||||
|
||||
LiftoffAssembler asm_;
|
||||
|
@ -958,6 +958,16 @@ void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
|
||||
}
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_i32_subi(Register dst, Register lhs, int32_t imm) {
|
||||
if (dst != lhs) {
|
||||
// We'll have to implement an UB-safe version if we need this corner case.
|
||||
DCHECK_NE(imm, kMinInt);
|
||||
leal(dst, Operand(lhs, -imm));
|
||||
} else {
|
||||
subl(dst, Immediate(imm));
|
||||
}
|
||||
}
|
||||
|
||||
namespace liftoff {
|
||||
template <void (Assembler::*op)(Register, Register),
|
||||
void (Assembler::*mov)(Register, Register)>
|
||||
@ -2063,6 +2073,14 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
|
||||
j(cond, label);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
|
||||
Label* label, Register lhs,
|
||||
int imm) {
|
||||
Condition cond = liftoff::ToCondition(liftoff_cond);
|
||||
cmpl(lhs, Immediate(imm));
|
||||
j(cond, label);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
|
||||
testl(src, src);
|
||||
setcc(equal, dst);
|
||||
@ -2161,6 +2179,13 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
|
||||
return true;
|
||||
}
|
||||
|
||||
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
|
||||
SmiCheckMode mode) {
|
||||
testb(obj, Immediate(kSmiTagMask));
|
||||
Condition condition = mode == kJumpOnSmi ? zero : not_zero;
|
||||
j(condition, target);
|
||||
}
|
||||
|
||||
// TODO(fanchenk): Distinguish mov* if data bypass delay matter.
|
||||
namespace liftoff {
|
||||
template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
|
||||
|
@ -846,16 +846,8 @@ class WasmGraphBuildingInterface {
|
||||
const WasmModule* module) {
|
||||
StaticKnowledge result;
|
||||
result.object_can_be_null = object_type.is_nullable();
|
||||
result.object_must_be_data_ref = false;
|
||||
DCHECK(object_type.is_object_reference_type()); // Checked by validation.
|
||||
if (object_type.has_index()) {
|
||||
uint32_t reftype = object_type.ref_index();
|
||||
// TODO(7748): When we implement dataref (=any struct or array), add it
|
||||
// to this list.
|
||||
if (module->has_struct(reftype) || module->has_array(reftype)) {
|
||||
result.object_must_be_data_ref = true;
|
||||
}
|
||||
}
|
||||
result.object_must_be_data_ref = is_data_ref_type(object_type, module);
|
||||
result.object_can_be_i31 = IsSubtypeOf(kWasmI31Ref, object_type, module);
|
||||
result.rtt_is_i31 = rtt_type.heap_representation() == HeapType::kI31;
|
||||
result.rtt_depth = rtt_type.depth();
|
||||
|
@ -495,6 +495,14 @@ inline int declared_function_index(const WasmModule* module, int func_index) {
|
||||
return declared_idx;
|
||||
}
|
||||
|
||||
inline bool is_data_ref_type(ValueType type, const WasmModule* module) {
|
||||
// TODO(7748): When we implement dataref (=any struct or array), support
|
||||
// that here.
|
||||
if (!type.has_index()) return false;
|
||||
uint32_t index = type.ref_index();
|
||||
return module->has_struct(index) || module->has_array(index);
|
||||
}
|
||||
|
||||
// TruncatedUserString makes it easy to output names up to a certain length, and
|
||||
// output a truncation followed by '...' if they exceed a limit.
|
||||
// Use like this:
|
||||
|
@ -273,8 +273,9 @@ WASM_COMPILED_EXEC_TEST(WasmBasicStruct) {
|
||||
|
||||
// Test struct.set, ref.as_non_null,
|
||||
// struct refs types in globals and if-results.
|
||||
TEST(WasmRefAsNonNull) {
|
||||
WasmGCTester tester;
|
||||
WASM_COMPILED_EXEC_TEST(WasmRefAsNonNull) {
|
||||
WasmGCTester tester(execution_tier);
|
||||
FLAG_experimental_liftoff_extern_ref = true;
|
||||
const byte type_index =
|
||||
tester.DefineStruct({F(kWasmI32, true), F(kWasmI32, true)});
|
||||
ValueType kRefTypes[] = {ref(type_index)};
|
||||
@ -303,8 +304,9 @@ TEST(WasmRefAsNonNull) {
|
||||
tester.CheckResult(kFunc, 55);
|
||||
}
|
||||
|
||||
TEST(WasmBrOnNull) {
|
||||
WasmGCTester tester;
|
||||
WASM_COMPILED_EXEC_TEST(WasmBrOnNull) {
|
||||
WasmGCTester tester(execution_tier);
|
||||
FLAG_experimental_liftoff_extern_ref = true;
|
||||
const byte type_index =
|
||||
tester.DefineStruct({F(kWasmI32, true), F(kWasmI32, true)});
|
||||
ValueType kRefTypes[] = {ref(type_index)};
|
||||
@ -340,8 +342,9 @@ TEST(WasmBrOnNull) {
|
||||
tester.CheckResult(kNotTaken, 52);
|
||||
}
|
||||
|
||||
TEST(BrOnCast) {
|
||||
WasmGCTester tester;
|
||||
WASM_COMPILED_EXEC_TEST(BrOnCast) {
|
||||
WasmGCTester tester(execution_tier);
|
||||
FLAG_experimental_liftoff_extern_ref = true;
|
||||
const byte type_index = tester.DefineStruct({F(kWasmI32, true)});
|
||||
const byte rtt_index =
|
||||
tester.AddGlobal(ValueType::Rtt(type_index, 1), false,
|
||||
|
Loading…
Reference in New Issue
Block a user