[wasm] Unify builtins for AtomicWait for 32-bit and 64-bit

The timeout parameter of WebAssembly's Atomic.Wait is of type I64. There
existed two sets of builtins to pass this I64 parameter from generated
code to a runtime function: one set for 64-bit platforms where the
parameter was passed directly, and one set for 32-bit platforms where
the parameter was passed as two I32 values.

With this CL we first convert the timeout parameter to a BigInt in the
generated code and then pass the BigInt to a unified builtin. Thereby
the builtin can be written completely in Torque instead of CSA.

For I64AtomicWait also the expected parameter is of type I64, so the
same handling is added for the expected parameter.

R=clemensb@chromium.org
CC=​​manoskouk@chromium.org

Bug: v8:13427
Change-Id: Ia2bb77081cf0db3615d965dbe0e5b97b806a8d1b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3990690
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83997}
This commit is contained in:
Andreas Haas 2022-10-31 14:33:04 +01:00 committed by V8 LUCI CQ
parent b571605b00
commit 055d2d877e
9 changed files with 114 additions and 204 deletions

View File

@ -988,8 +988,6 @@ namespace internal {
IF_WASM(ASM, WasmOnStackReplace, WasmDummy) \
IF_WASM(TFC, WasmFloat32ToNumber, WasmFloat32ToNumber) \
IF_WASM(TFC, WasmFloat64ToNumber, WasmFloat64ToNumber) \
IF_WASM(TFC, WasmI32AtomicWait32, WasmI32AtomicWait32) \
IF_WASM(TFC, WasmI64AtomicWait32, WasmI64AtomicWait32) \
IF_WASM(TFC, JSToWasmLazyDeoptContinuation, SingleParameterOnStack) \
\
/* WeakMap */ \

View File

@ -52,60 +52,6 @@ TF_BUILTIN(WasmFloat64ToNumber, WasmBuiltinsAssembler) {
Return(ChangeFloat64ToTagged(val));
}
TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) {
if (!Is32()) {
Unreachable();
return;
}
auto address = UncheckedParameter<Uint32T>(Descriptor::kAddress);
TNode<Number> address_number = ChangeUint32ToTagged(address);
auto expected_value = UncheckedParameter<Int32T>(Descriptor::kExpectedValue);
TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
auto timeout_low = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutLow);
auto timeout_high = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutHigh);
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance,
address_number, expected_value_number, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
if (!Is32()) {
Unreachable();
return;
}
auto address = UncheckedParameter<Uint32T>(Descriptor::kAddress);
TNode<Number> address_number = ChangeUint32ToTagged(address);
auto expected_value_low =
UncheckedParameter<IntPtrT>(Descriptor::kExpectedValueLow);
auto expected_value_high =
UncheckedParameter<IntPtrT>(Descriptor::kExpectedValueHigh);
TNode<BigInt> expected_value =
BigIntFromInt32Pair(expected_value_low, expected_value_high);
auto timeout_low = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutLow);
auto timeout_high = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutHigh);
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance,
address_number, expected_value, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
TF_BUILTIN(JSToWasmLazyDeoptContinuation, WasmBuiltinsAssembler) {
// Reset thread_in_wasm_flag.
TNode<ExternalReference> thread_in_wasm_flag_address_address =

View File

@ -484,30 +484,22 @@ builtin WasmAtomicNotify(offset: uintptr, count: uint32): uint32 {
return Unsigned(SmiToInt32(result));
}
builtin WasmI32AtomicWait64(
offset: uintptr, expectedValue: int32, timeout: intptr): uint32 {
if constexpr (Is64()) {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmI32AtomicWait(
LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
WasmInt32ToNumber(expectedValue), I64ToBigInt(timeout));
return Unsigned(SmiToInt32(result));
} else {
unreachable;
}
builtin WasmI32AtomicWait(
offset: uintptr, expectedValue: int32, timeout: BigInt): uint32 {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmI32AtomicWait(
LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
WasmInt32ToNumber(expectedValue), timeout);
return Unsigned(SmiToInt32(result));
}
builtin WasmI64AtomicWait64(
offset: uintptr, expectedValue: intptr, timeout: intptr): uint32 {
if constexpr (Is64()) {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmI64AtomicWait(
LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
I64ToBigInt(expectedValue), I64ToBigInt(timeout));
return Unsigned(SmiToInt32(result));
} else {
unreachable;
}
builtin WasmI64AtomicWait(
offset: uintptr, expectedValue: BigInt, timeout: BigInt): uint32 {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmI64AtomicWait(
LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
expectedValue, timeout);
return Unsigned(SmiToInt32(result));
}
// Type feedback collection support for `call_ref`.

View File

@ -130,8 +130,6 @@ namespace internal {
V(Void) \
V(WasmFloat32ToNumber) \
V(WasmFloat64ToNumber) \
V(WasmI32AtomicWait32) \
V(WasmI64AtomicWait32) \
V(WasmSuspend) \
V(WriteBarrier) \
IF_TSAN(V, TSANLoad) \
@ -1983,38 +1981,6 @@ class V8_EXPORT_PRIVATE BigIntToI32PairDescriptor final
DECLARE_DESCRIPTOR(BigIntToI32PairDescriptor)
};
class WasmI32AtomicWait32Descriptor final
: public StaticCallInterfaceDescriptor<WasmI32AtomicWait32Descriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeoutLow,
kTimeoutHigh)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Int32(), // kExpectedValue
MachineType::Uint32(), // kTimeoutLow
MachineType::Uint32()) // kTimeoutHigh
DECLARE_DESCRIPTOR(WasmI32AtomicWait32Descriptor)
};
class WasmI64AtomicWait32Descriptor final
: public StaticCallInterfaceDescriptor<WasmI64AtomicWait32Descriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValueLow, kExpectedValueHigh,
kTimeoutLow, kTimeoutHigh)
static constexpr bool kNoStackScan = true;
DEFINE_RESULT_AND_PARAMETER_TYPES(
MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Uint32(), // kExpectedValueLow
MachineType::Uint32(), // kExpectedValueHigh
MachineType::Uint32(), // kTimeoutLow
MachineType::Uint32()) // kTimeoutHigh
DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor)
};
class CloneObjectWithVectorDescriptor final
: public StaticCallInterfaceDescriptor<CloneObjectWithVectorDescriptor> {
public:

View File

@ -625,9 +625,8 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindSmallInnermostLoopFromHeader(
WasmCode::kWasmTableGetFuncRef, WasmCode::kWasmTableSetFuncRef,
WasmCode::kWasmTableGrow,
// Atomics.
WasmCode::kWasmAtomicNotify, WasmCode::kWasmI32AtomicWait32,
WasmCode::kWasmI32AtomicWait64, WasmCode::kWasmI64AtomicWait32,
WasmCode::kWasmI64AtomicWait64,
WasmCode::kWasmAtomicNotify, WasmCode::kWasmI32AtomicWait,
WasmCode::kWasmI64AtomicWait,
// Exceptions.
WasmCode::kWasmAllocateFixedArray, WasmCode::kWasmThrow,
WasmCode::kWasmRethrow, WasmCode::kWasmRethrowExplicitContext,

View File

@ -3958,34 +3958,6 @@ void WasmGraphBuilder::AddInt64LoweringReplacement(
lowering_special_case_->replacements.insert({original, replacement});
}
CallDescriptor* WasmGraphBuilder::GetI32AtomicWaitCallDescriptor() {
if (i32_atomic_wait_descriptor_) return i32_atomic_wait_descriptor_;
i32_atomic_wait_descriptor_ = GetBuiltinCallDescriptor(
Builtin::kWasmI32AtomicWait64, zone_, StubCallMode::kCallWasmRuntimeStub);
AddInt64LoweringReplacement(
i32_atomic_wait_descriptor_,
GetBuiltinCallDescriptor(Builtin::kWasmI32AtomicWait32, zone_,
StubCallMode::kCallWasmRuntimeStub));
return i32_atomic_wait_descriptor_;
}
CallDescriptor* WasmGraphBuilder::GetI64AtomicWaitCallDescriptor() {
if (i64_atomic_wait_descriptor_) return i64_atomic_wait_descriptor_;
i64_atomic_wait_descriptor_ = GetBuiltinCallDescriptor(
Builtin::kWasmI64AtomicWait64, zone_, StubCallMode::kCallWasmRuntimeStub);
AddInt64LoweringReplacement(
i64_atomic_wait_descriptor_,
GetBuiltinCallDescriptor(Builtin::kWasmI64AtomicWait32, zone_,
StubCallMode::kCallWasmRuntimeStub));
return i64_atomic_wait_descriptor_;
}
void WasmGraphBuilder::LowerInt64(Signature<MachineRepresentation>* sig) {
if (mcgraph()->machine()->Is64()) return;
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(),
@ -3999,6 +3971,45 @@ void WasmGraphBuilder::LowerInt64(CallOrigin origin) {
LowerInt64(CreateMachineSignature(mcgraph()->zone(), sig_, origin));
}
CallDescriptor* WasmGraphBuilder::GetI64ToBigIntCallDescriptor(
StubCallMode stub_mode) {
CallDescriptor** i64_to_bigint_descriptor =
stub_mode == StubCallMode::kCallCodeObject
? &i64_to_bigint_stub_descriptor_
: &i64_to_bigint_builtin_descriptor_;
if (*i64_to_bigint_descriptor) return *i64_to_bigint_descriptor;
*i64_to_bigint_descriptor =
GetBuiltinCallDescriptor(Builtin::kI64ToBigInt, zone_, stub_mode);
AddInt64LoweringReplacement(
*i64_to_bigint_descriptor,
GetBuiltinCallDescriptor(Builtin::kI32PairToBigInt, zone_, stub_mode));
return *i64_to_bigint_descriptor;
}
Node* WasmGraphBuilder::BuildChangeInt64ToBigInt(Node* input,
StubCallMode stub_mode) {
Node* target;
if (mcgraph()->machine()->Is64()) {
target = (stub_mode == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kI64ToBigInt, RelocInfo::WASM_STUB_CALL)
: gasm_->GetBuiltinPointerTarget(Builtin::kI64ToBigInt);
} else {
DCHECK(mcgraph()->machine()->Is32());
// On 32-bit platforms we already set the target to the
// I32PairToBigInt builtin here, so that we don't have to replace the
// target in the int64-lowering.
target =
(stub_mode == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kI32PairToBigInt, RelocInfo::WASM_STUB_CALL)
: gasm_->GetBuiltinPointerTarget(Builtin::kI32PairToBigInt);
}
return gasm_->Call(GetI64ToBigIntCallDescriptor(stub_mode), target, input);
}
void WasmGraphBuilder::SetSourcePosition(Node* node,
wasm::WasmCodePosition position) {
DCHECK_NE(position, wasm::kNoCodePosition);
@ -4965,29 +4976,31 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
inputs[1]);
case wasm::kExprI32AtomicWait: {
auto* call_descriptor = GetI32AtomicWaitCallDescriptor();
constexpr StubCallMode kStubMode = StubCallMode::kCallWasmRuntimeStub;
auto* call_descriptor = GetBuiltinCallDescriptor(
Builtin::kWasmI32AtomicWait, zone_, kStubMode);
intptr_t target = mcgraph()->machine()->Is64()
? wasm::WasmCode::kWasmI32AtomicWait64
: wasm::WasmCode::kWasmI32AtomicWait32;
intptr_t target = wasm::WasmCode::kWasmI32AtomicWait;
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
target, RelocInfo::WASM_STUB_CALL);
return gasm_->Call(call_descriptor, call_target, effective_offset,
inputs[1], inputs[2]);
inputs[1],
BuildChangeInt64ToBigInt(inputs[2], kStubMode));
}
case wasm::kExprI64AtomicWait: {
auto* call_descriptor = GetI64AtomicWaitCallDescriptor();
constexpr StubCallMode kStubMode = StubCallMode::kCallWasmRuntimeStub;
auto* call_descriptor = GetBuiltinCallDescriptor(
Builtin::kWasmI64AtomicWait, zone_, kStubMode);
intptr_t target = mcgraph()->machine()->Is64()
? wasm::WasmCode::kWasmI64AtomicWait64
: wasm::WasmCode::kWasmI64AtomicWait32;
intptr_t target = wasm::WasmCode::kWasmI64AtomicWait;
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
target, RelocInfo::WASM_STUB_CALL);
return gasm_->Call(call_descriptor, call_target, effective_offset,
inputs[1], inputs[2]);
BuildChangeInt64ToBigInt(inputs[1], kStubMode),
BuildChangeInt64ToBigInt(inputs[2], kStubMode));
}
default:
@ -6287,18 +6300,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
stub_mode_(stub_mode),
enabled_features_(features) {}
CallDescriptor* GetI64ToBigIntCallDescriptor() {
if (i64_to_bigint_descriptor_) return i64_to_bigint_descriptor_;
i64_to_bigint_descriptor_ =
GetBuiltinCallDescriptor(Builtin::kI64ToBigInt, zone_, stub_mode_);
AddInt64LoweringReplacement(
i64_to_bigint_descriptor_,
GetBuiltinCallDescriptor(Builtin::kI32PairToBigInt, zone_, stub_mode_));
return i64_to_bigint_descriptor_;
}
CallDescriptor* GetBigIntToI64CallDescriptor(bool needs_frame_state) {
if (bigint_to_i64_descriptor_) return bigint_to_i64_descriptor_;
@ -6464,7 +6465,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kI32:
return BuildChangeInt32ToNumber(node);
case wasm::kI64:
return BuildChangeInt64ToBigInt(node);
return BuildChangeInt64ToBigInt(node, stub_mode_);
case wasm::kF32:
return BuildChangeFloat32ToNumber(node);
case wasm::kF64:
@ -6534,22 +6535,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
kLeaveFunctionsAlone = false
};
Node* BuildChangeInt64ToBigInt(Node* input) {
Node* target;
if (mcgraph()->machine()->Is64()) {
target = GetTargetForBuiltinCall(wasm::WasmCode::kI64ToBigInt,
Builtin::kI64ToBigInt);
} else {
DCHECK(mcgraph()->machine()->Is32());
// On 32-bit platforms we already set the target to the
// I32PairToBigInt builtin here, so that we don't have to replace the
// target in the int64-lowering.
target = GetTargetForBuiltinCall(wasm::WasmCode::kI32PairToBigInt,
Builtin::kI32PairToBigInt);
}
return gasm_->Call(GetI64ToBigIntCallDescriptor(), target, input);
}
Node* BuildChangeBigIntToInt64(Node* input, Node* context,
Node* frame_state) {
Node* target;
@ -7703,7 +7688,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetOncePointer<const Operator> tagged_to_float64_operator_;
wasm::WasmFeatures enabled_features_;
CallDescriptor* bigint_to_i64_descriptor_ = nullptr;
CallDescriptor* i64_to_bigint_descriptor_ = nullptr;
};
} // namespace

View File

@ -834,9 +834,9 @@ class WasmGraphBuilder {
void AddInt64LoweringReplacement(CallDescriptor* original,
CallDescriptor* replacement);
CallDescriptor* GetI32AtomicWaitCallDescriptor();
Node* BuildChangeInt64ToBigInt(Node* input, StubCallMode stub_mode);
CallDescriptor* GetI64AtomicWaitCallDescriptor();
CallDescriptor* GetI64ToBigIntCallDescriptor(StubCallMode stub_mode);
Node* StoreArgsInStackSlot(
std::initializer_list<std::pair<MachineRepresentation, Node*>> args);
@ -866,8 +866,8 @@ class WasmGraphBuilder {
SetOncePointer<Node> instance_node_;
std::unique_ptr<Int64LoweringSpecialCase> lowering_special_case_;
CallDescriptor* i32_atomic_wait_descriptor_ = nullptr;
CallDescriptor* i64_atomic_wait_descriptor_ = nullptr;
CallDescriptor* i64_to_bigint_builtin_descriptor_ = nullptr;
CallDescriptor* i64_to_bigint_stub_descriptor_ = nullptr;
};
enum WasmCallKind { kWasmFunction, kWasmImportWrapper, kWasmCapiFunction };

View File

@ -4898,12 +4898,41 @@ class LiftoffCompiler {
void AtomicWait(FullDecoder* decoder, ValueKind kind,
const MemoryAccessImmediate& imm) {
LiftoffRegister full_index = __ PeekToRegister(2, {});
{
// Convert the top value of the stack (the timeout) from I64 to a BigInt,
// which we can then pass to the atomic.wait builtin.
LiftoffAssembler::VarState i64_timeout =
__ cache_state()->stack_state.back();
CallRuntimeStub(
kNeedI64RegPair ? WasmCode::kI32PairToBigInt : WasmCode::kI64ToBigInt,
MakeSig::Returns(kPointerKind).Params(kI64), {i64_timeout},
decoder->position());
__ DropValues(1);
__ PushRegister(kPointerKind, LiftoffRegister(kReturnRegister0));
}
LiftoffRegList pinned;
Register expected_reg = no_reg;
if (kind == kI32) {
expected_reg = __ PeekToRegister(1, pinned).gp();
} else {
LiftoffAssembler::VarState i64_expected =
__ cache_state()->stack_state.end()[-2];
CallRuntimeStub(
kNeedI64RegPair ? WasmCode::kI32PairToBigInt : WasmCode::kI64ToBigInt,
MakeSig::Returns(kPointerKind).Params(kI64), {i64_expected},
decoder->position());
expected_reg = kReturnRegister0;
}
LiftoffRegister expected(expected_reg);
pinned.set(expected_reg);
LiftoffRegister full_index = __ PeekToRegister(2, pinned);
Register index_reg =
BoundsCheckMem(decoder, value_kind_size(kind), imm.offset, full_index,
{}, kDoForceCheck);
pinned, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned{index_reg};
pinned.set(index_reg);
AlignmentCheckMem(decoder, value_kind_size(kind), imm.offset, index_reg,
pinned);
@ -4920,22 +4949,20 @@ class LiftoffCompiler {
LiftoffAssembler::VarState timeout =
__ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState expected_value =
__ cache_state()->stack_state.end()[-2];
LiftoffAssembler::VarState expected_value(kPointerKind, expected, 0);
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3];
// We have to set the correct register for the index.
index.MakeRegister(LiftoffRegister(index_plus_offset));
static constexpr WasmCode::RuntimeStubId kTargets[2][2]{
// 64 bit systems (kNeedI64RegPair == false):
{WasmCode::kWasmI64AtomicWait64, WasmCode::kWasmI32AtomicWait64},
// 32 bit systems (kNeedI64RegPair == true):
{WasmCode::kWasmI64AtomicWait32, WasmCode::kWasmI32AtomicWait32}};
auto target = kTargets[kNeedI64RegPair][kind == kI32];
auto target = kind == kI32 ? WasmCode::kWasmI32AtomicWait
: WasmCode::kWasmI64AtomicWait;
CallRuntimeStub(target, MakeSig::Params(kPointerKind, kind, kI64),
{index, expected_value, timeout}, decoder->position());
CallRuntimeStub(
target,
MakeSig::Params(kPointerKind, kind == kI32 ? kI32 : kPointerKind,
kPointerKind),
{index, expected_value, timeout}, decoder->position());
// Pop parameters from the value stack.
__ DropValues(3);

View File

@ -63,10 +63,8 @@ struct WasmModule;
V(WasmTaggedToFloat64) \
V(WasmAllocateJSArray) \
V(WasmAtomicNotify) \
V(WasmI32AtomicWait32) \
V(WasmI32AtomicWait64) \
V(WasmI64AtomicWait32) \
V(WasmI64AtomicWait64) \
V(WasmI32AtomicWait) \
V(WasmI64AtomicWait) \
V(WasmGetOwnProperty) \
V(WasmRefFunc) \
V(WasmMemoryGrow) \