diff --git a/src/wasm/baseline/arm/liftoff-assembler-arm.h b/src/wasm/baseline/arm/liftoff-assembler-arm.h index 055a1c9981..d82ebb7f77 100644 --- a/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -703,13 +703,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, LiftoffRegister src, LiftoffRegList pinned) { STATIC_ASSERT(kTaggedSize == kInt32Size); - { - // Store the value. - UseScratchRegisterScope temps(this); - MemOperand dst_op = - liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm); - str(src.gp(), dst_op); - } + // Store the value. + UseScratchRegisterScope temps(this); + MemOperand dst_op = + liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm); + str(src.gp(), dst_op); // The write barrier. Label write_barrier; Label exit; diff --git a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 63d2fad024..5d0f2e4af4 100644 --- a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -71,7 +71,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base, case ValueType::kI32: case ValueType::kOptRef: case ValueType::kRef: - case ValueType::kRtt: assm->mov(dst.gp(), src); break; case ValueType::kI64: diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index 6db65d729a..44f75b4ef5 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -4063,9 +4063,6 @@ class LiftoffCompiler { __ PushRegister(kWasmI32, len); } - // 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation. - constexpr static int kI31To32BitSmiShift = 33; - void I31New(FullDecoder* decoder, const Value& input, Value* result) { LiftoffRegister src = __ PopToRegister(); LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {}); @@ -4074,33 +4071,20 @@ class LiftoffCompiler { __ emit_i32_shli(dst.gp(), src.gp(), kSmiTagSize); } else { DCHECK(SmiValuesAre32Bits()); + // 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation. + constexpr int kI31To32BitSmiShift = 33; __ emit_i64_shli(dst, src, kI31To32BitSmiShift); } __ PushRegister(kWasmI31Ref, dst); } void I31GetS(FullDecoder* decoder, const Value& input, Value* result) { - LiftoffRegister src = __ PopToRegister(); - LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {}); - if (SmiValuesAre31Bits()) { - __ emit_i32_sari(dst.gp(), src.gp(), kSmiTagSize); - } else { - DCHECK(SmiValuesAre32Bits()); - __ emit_i64_sari(dst, src, kI31To32BitSmiShift); - } - __ PushRegister(kWasmI32, dst); + // TODO(7748): Implement. + unsupported(decoder, kGC, "i31.get_s"); } - void I31GetU(FullDecoder* decoder, const Value& input, Value* result) { - LiftoffRegister src = __ PopToRegister(); - LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {}); - if (SmiValuesAre31Bits()) { - __ emit_i32_shri(dst.gp(), src.gp(), kSmiTagSize); - } else { - DCHECK(SmiValuesAre32Bits()); - __ emit_i64_shri(dst, src, kI31To32BitSmiShift); - } - __ PushRegister(kWasmI32, dst); + // TODO(7748): Implement. + unsupported(decoder, kGC, "i31.get_u"); } void RttCanon(FullDecoder* decoder, const HeapTypeImmediate& imm, diff --git a/src/wasm/baseline/x64/liftoff-assembler-x64.h b/src/wasm/baseline/x64/liftoff-assembler-x64.h index 575ab1e939..7810c03d4b 100644 --- a/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -90,7 +90,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src, case ValueType::kI64: case ValueType::kOptRef: case ValueType::kRef: - case ValueType::kRtt: assm->movq(dst.gp(), src); break; case ValueType::kF32: diff --git a/test/cctest/wasm/test-gc.cc b/test/cctest/wasm/test-gc.cc index 638f13327d..33530cd675 100644 --- a/test/cctest/wasm/test-gc.cc +++ b/test/cctest/wasm/test-gc.cc @@ -42,7 +42,6 @@ class WasmGCTester { flag_liftoff_only( &v8::internal::FLAG_liftoff_only, execution_tier == TestExecutionTier::kLiftoff ? true : false), - flag_tierup(&v8::internal::FLAG_wasm_tier_up, false), zone(&allocator, ZONE_NAME), builder_(&zone), isolate_(CcTest::InitIsolateOnce()), @@ -183,7 +182,6 @@ class WasmGCTester { const FlagScope flag_typedfuns; const FlagScope flag_liftoff; const FlagScope flag_liftoff_only; - const FlagScope flag_tierup; v8::internal::AccountingAllocator allocator; Zone zone; @@ -1088,9 +1086,8 @@ TEST(RefTestCastNull) { tester.CheckHasThrown(kRefCastNull, 0); } -WASM_COMPILED_EXEC_TEST(BasicI31) { - WasmGCTester tester(execution_tier); - FLAG_experimental_liftoff_extern_ref = true; +TEST(BasicI31) { + WasmGCTester tester; const byte kSigned = tester.DefineFunction( tester.sigs.i_i(), {}, {WASM_I31_GET_S(WASM_I31_NEW(WASM_GET_LOCAL(0))), kExprEnd}); @@ -1110,9 +1107,8 @@ WASM_COMPILED_EXEC_TEST(BasicI31) { tester.CheckResult(kUnsigned, 0x7FFFFFFF, 0x7FFFFFFF); } -WASM_COMPILED_EXEC_TEST(I31Casts) { - WasmGCTester tester(execution_tier); - FLAG_experimental_liftoff_extern_ref = true; +TEST(I31Casts) { + WasmGCTester tester; const byte struct_type = tester.DefineStruct({F(wasm::kWasmI32, true)}); const byte i31_rtt = tester.AddGlobal(ValueType::Rtt(HeapType::kI31, 1), false, @@ -1172,9 +1168,8 @@ WASM_COMPILED_EXEC_TEST(I31Casts) { // This flushed out a few bugs, so it serves as a regression test. It can also // be modified (made to run longer) to measure performance of casts. -WASM_COMPILED_EXEC_TEST(CastsBenchmark) { - WasmGCTester tester(execution_tier); - FLAG_experimental_liftoff_extern_ref = true; +TEST(CastsBenchmark) { + WasmGCTester tester; const byte SuperType = tester.DefineStruct({F(wasm::kWasmI32, true)}); const byte SubType = tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)});