[wasm][turbofan] Immutable object operators

Design doc: bit.ly/36MfD6Y

We introduce simplified operators LoadImmutableFromObject and
InitializeImmutableInObject. These are lowered to Loads and Stores like
LoadFromObject and StoreToObject.
We split CsaLoadElimination::AbstractState in two HalfStates,
which represent the mutable and immutable component of the state.
Immutable operators in the effect chain modify the immutable half-state,
and plain operators modify the mutable half-state. The immutable part is
maintained through write effects and loop headers. Immutable
initializations do not lookup and kill previous overlapping stores,
assuming each offset cannot be initialized more than once.

Bug: v8:11510

Change-Id: I0f5feca3354fdd3bdc1f511cc5214ec51e1407ad
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3268728
Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78325}
This commit is contained in:
Manos Koukoutos 2021-12-09 13:33:23 +00:00 committed by V8 LUCI CQ
parent 002e39e97a
commit 4113cf6420
14 changed files with 424 additions and 135 deletions

View File

@ -32,7 +32,8 @@ Reduction CsaLoadElimination::Reduce(Node* node) {
if (AbstractState const* const state = node_states_.Get(effect)) {
PrintF(" state[%i]: #%d:%s\n", i, effect->id(),
effect->op()->mnemonic());
state->Print();
state->mutable_state.Print();
state->immutable_state.Print();
} else {
PrintF(" no state[%i]: #%d:%s\n", i, effect->id(),
effect->op()->mnemonic());
@ -42,8 +43,10 @@ Reduction CsaLoadElimination::Reduce(Node* node) {
}
switch (node->opcode()) {
case IrOpcode::kLoadFromObject:
case IrOpcode::kLoadImmutableFromObject:
return ReduceLoadFromObject(node, ObjectAccessOf(node->op()));
case IrOpcode::kStoreToObject:
case IrOpcode::kInitializeImmutableInObject:
return ReduceStoreToObject(node, ObjectAccessOf(node->op()));
case IrOpcode::kDebugBreak:
case IrOpcode::kAbortCSADcheck:
@ -92,7 +95,7 @@ namespace Helpers = CsaLoadEliminationHelpers;
// static
template <typename OuterKey>
void CsaLoadElimination::AbstractState::IntersectWith(
void CsaLoadElimination::HalfState::IntersectWith(
OuterMap<OuterKey>& to, const OuterMap<OuterKey>& from) {
FieldInfo empty_info;
for (const std::pair<OuterKey, InnerMap>& to_map : to) {
@ -108,8 +111,7 @@ void CsaLoadElimination::AbstractState::IntersectWith(
}
}
void CsaLoadElimination::AbstractState::IntersectWith(
AbstractState const* that) {
void CsaLoadElimination::HalfState::IntersectWith(HalfState const* that) {
IntersectWith(fresh_entries_, that->fresh_entries_);
IntersectWith(constant_entries_, that->constant_entries_);
IntersectWith(arbitrary_entries_, that->arbitrary_entries_);
@ -118,10 +120,9 @@ void CsaLoadElimination::AbstractState::IntersectWith(
IntersectWith(arbitrary_unknown_entries_, that->arbitrary_unknown_entries_);
}
CsaLoadElimination::AbstractState const*
CsaLoadElimination::AbstractState::KillField(Node* object, Node* offset,
MachineRepresentation repr) const {
AbstractState* result = zone_->New<AbstractState>(*this);
CsaLoadElimination::HalfState const* CsaLoadElimination::HalfState::KillField(
Node* object, Node* offset, MachineRepresentation repr) const {
HalfState* result = zone_->New<HalfState>(*this);
UnknownOffsetInfos empty_unknown(zone_, InnerMap(zone_));
IntPtrMatcher m(offset);
if (m.HasResolvedValue()) {
@ -179,18 +180,16 @@ CsaLoadElimination::AbstractState::KillField(Node* object, Node* offset,
result->arbitrary_unknown_entries_ = empty_unknown;
} else {
// May alias with anything. Clear the state.
return zone_->New<AbstractState>(zone_);
return zone_->New<HalfState>(zone_);
}
}
return result;
}
CsaLoadElimination::AbstractState const*
CsaLoadElimination::AbstractState::AddField(Node* object, Node* offset,
Node* value,
MachineRepresentation repr) const {
AbstractState* new_state = zone_->New<AbstractState>(*this);
CsaLoadElimination::HalfState const* CsaLoadElimination::HalfState::AddField(
Node* object, Node* offset, Node* value, MachineRepresentation repr) const {
HalfState* new_state = zone_->New<HalfState>(*this);
IntPtrMatcher m(offset);
if (m.HasResolvedValue()) {
uint32_t offset_num = static_cast<uint32_t>(m.ResolvedValue());
@ -212,7 +211,7 @@ CsaLoadElimination::AbstractState::AddField(Node* object, Node* offset,
return new_state;
}
CsaLoadElimination::FieldInfo CsaLoadElimination::AbstractState::Lookup(
CsaLoadElimination::FieldInfo CsaLoadElimination::HalfState::Lookup(
Node* object, Node* offset) const {
IntPtrMatcher m(offset);
if (m.HasResolvedValue()) {
@ -236,10 +235,10 @@ CsaLoadElimination::FieldInfo CsaLoadElimination::AbstractState::Lookup(
// static
// Kill all elements in {infos} that overlap with an element with {offset} and
// size {ElementSizeInBytes(repr)}.
void CsaLoadElimination::AbstractState::KillOffset(ConstantOffsetInfos& infos,
uint32_t offset,
MachineRepresentation repr,
Zone* zone) {
void CsaLoadElimination::HalfState::KillOffset(ConstantOffsetInfos& infos,
uint32_t offset,
MachineRepresentation repr,
Zone* zone) {
// All elements in the range [{offset}, {offset + ElementSizeInBytes(repr)})
// are in the killed range. We do not need to traverse the inner maps, we can
// just clear them.
@ -270,7 +269,7 @@ void CsaLoadElimination::AbstractState::KillOffset(ConstantOffsetInfos& infos,
}
}
void CsaLoadElimination::AbstractState::KillOffsetInFresh(
void CsaLoadElimination::HalfState::KillOffsetInFresh(
Node* const object, uint32_t offset, MachineRepresentation repr) {
for (int i = 0; i < ElementSizeInBytes(repr); i++) {
Update(fresh_entries_, offset + i, object, {});
@ -289,15 +288,15 @@ void CsaLoadElimination::AbstractState::KillOffsetInFresh(
}
// static
void CsaLoadElimination::AbstractState::Print(
const CsaLoadElimination::AbstractState::ConstantOffsetInfos& infos) {
void CsaLoadElimination::HalfState::Print(
const CsaLoadElimination::HalfState::ConstantOffsetInfos& infos) {
for (const auto outer_entry : infos) {
for (const auto inner_entry : outer_entry.second) {
Node* object = inner_entry.first;
uint32_t offset = outer_entry.first;
FieldInfo info = inner_entry.second;
PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset,
object->op()->mnemonic(), info.value->id(),
PrintF(" #%d:%s+(%d) -> #%d:%s [repr=%s]\n", object->id(),
object->op()->mnemonic(), offset, info.value->id(),
info.value->op()->mnemonic(),
MachineReprToString(info.representation));
}
@ -305,22 +304,22 @@ void CsaLoadElimination::AbstractState::Print(
}
// static
void CsaLoadElimination::AbstractState::Print(
const CsaLoadElimination::AbstractState::UnknownOffsetInfos& infos) {
void CsaLoadElimination::HalfState::Print(
const CsaLoadElimination::HalfState::UnknownOffsetInfos& infos) {
for (const auto outer_entry : infos) {
for (const auto inner_entry : outer_entry.second) {
Node* object = outer_entry.first;
Node* offset = inner_entry.first;
FieldInfo info = inner_entry.second;
PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset->id(),
object->op()->mnemonic(), info.value->id(),
info.value->op()->mnemonic(),
PrintF(" #%d:%s+#%d:%s -> #%d:%s [repr=%s]\n", object->id(),
object->op()->mnemonic(), offset->id(), offset->op()->mnemonic(),
info.value->id(), info.value->op()->mnemonic(),
MachineReprToString(info.representation));
}
}
}
void CsaLoadElimination::AbstractState::Print() const {
void CsaLoadElimination::HalfState::Print() const {
Print(fresh_entries_);
Print(constant_entries_);
Print(arbitrary_entries_);
@ -331,14 +330,23 @@ void CsaLoadElimination::AbstractState::Print() const {
Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
ObjectAccess const& access) {
DCHECK(node->opcode() == IrOpcode::kLoadFromObject ||
node->opcode() == IrOpcode::kLoadImmutableFromObject);
Node* object = NodeProperties::GetValueInput(node, 0);
Node* offset = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
bool is_mutable = node->opcode() == IrOpcode::kLoadFromObject;
// We should never find a field in the wrong half-state.
DCHECK((is_mutable ? &state->immutable_state : &state->mutable_state)
->Lookup(object, offset)
.IsEmpty());
HalfState const* half_state =
is_mutable ? &state->mutable_state : &state->immutable_state;
MachineRepresentation representation = access.machine_type.representation();
FieldInfo lookup_result = state->Lookup(object, offset);
FieldInfo lookup_result = half_state->Lookup(object, offset);
if (!lookup_result.IsEmpty()) {
// Make sure we don't reuse values that were recorded with a different
// representation or resurrect dead {replacement} nodes.
@ -354,25 +362,47 @@ Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
return Replace(replacement);
}
}
state = state->AddField(object, offset, node, representation);
half_state = half_state->AddField(object, offset, node, representation);
return UpdateState(node, state);
AbstractState const* new_state =
is_mutable
? zone()->New<AbstractState>(*half_state, state->immutable_state)
: zone()->New<AbstractState>(state->mutable_state, *half_state);
return UpdateState(node, new_state);
}
Reduction CsaLoadElimination::ReduceStoreToObject(Node* node,
ObjectAccess const& access) {
DCHECK(node->opcode() == IrOpcode::kStoreToObject ||
node->opcode() == IrOpcode::kInitializeImmutableInObject);
Node* object = NodeProperties::GetValueInput(node, 0);
Node* offset = NodeProperties::GetValueInput(node, 1);
Node* value = NodeProperties::GetValueInput(node, 2);
Node* effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
MachineRepresentation repr = access.machine_type.representation();
state = state->KillField(object, offset, repr);
state = state->AddField(object, offset, value, repr);
return UpdateState(node, state);
if (node->opcode() == IrOpcode::kStoreToObject) {
// We should not find the field in the wrong half-state.
DCHECK(state->immutable_state.Lookup(object, offset).IsEmpty());
HalfState const* mutable_state =
state->mutable_state.KillField(object, offset, repr);
mutable_state = mutable_state->AddField(object, offset, value, repr);
AbstractState const* new_state =
zone()->New<AbstractState>(*mutable_state, state->immutable_state);
return UpdateState(node, new_state);
} else {
// We should not find the field in the wrong half-state.
DCHECK(state->mutable_state.Lookup(object, offset).IsEmpty());
// We should not initialize the same immutable field twice.
DCHECK(state->immutable_state.Lookup(object, offset).IsEmpty());
HalfState const* immutable_state =
state->immutable_state.AddField(object, offset, value, repr);
AbstractState const* new_state =
zone()->New<AbstractState>(state->mutable_state, *immutable_state);
return UpdateState(node, new_state);
}
}
Reduction CsaLoadElimination::ReduceEffectPhi(Node* node) {
@ -431,10 +461,13 @@ Reduction CsaLoadElimination::ReduceOtherNode(Node* node) {
// predecessor.
if (state == nullptr) return NoChange();
// If this {node} has some uncontrolled side effects, set its state to
// {empty_state()}, otherwise to its input state.
return UpdateState(node, node->op()->HasProperty(Operator::kNoWrite)
? state
: empty_state());
// the immutable half-state of its input state, otherwise to its input
// state.
return UpdateState(
node, node->op()->HasProperty(Operator::kNoWrite)
? state
: zone()->New<AbstractState>(HalfState(zone()),
state->immutable_state));
}
DCHECK_EQ(0, node->op()->EffectOutputCount());
return NoChange();
@ -475,7 +508,8 @@ CsaLoadElimination::AbstractState const* CsaLoadElimination::ComputeLoopState(
queue.pop();
if (visited.insert(current).second) {
if (!current->op()->HasProperty(Operator::kNoWrite)) {
return empty_state();
return zone()->New<AbstractState>(HalfState(zone()),
state->immutable_state);
}
for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
queue.push(NodeProperties::GetEffectInput(current, i));

View File

@ -62,9 +62,9 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
};
// Design doc: https://bit.ly/36MfD6Y
class AbstractState final : public ZoneObject {
class HalfState final : public ZoneObject {
public:
explicit AbstractState(Zone* zone)
explicit HalfState(Zone* zone)
: zone_(zone),
fresh_entries_(zone, InnerMap(zone)),
constant_entries_(zone, InnerMap(zone)),
@ -73,7 +73,7 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
constant_unknown_entries_(zone, InnerMap(zone)),
arbitrary_unknown_entries_(zone, InnerMap(zone)) {}
bool Equals(AbstractState const* that) const {
bool Equals(HalfState const* that) const {
return fresh_entries_ == that->fresh_entries_ &&
constant_entries_ == that->constant_entries_ &&
arbitrary_entries_ == that->arbitrary_entries_ &&
@ -81,33 +81,22 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
constant_unknown_entries_ == that->constant_unknown_entries_ &&
arbitrary_unknown_entries_ == that->arbitrary_unknown_entries_;
}
void IntersectWith(AbstractState const* that);
AbstractState const* KillField(Node* object, Node* offset,
MachineRepresentation repr) const;
AbstractState const* AddField(Node* object, Node* offset, Node* value,
MachineRepresentation repr) const;
void IntersectWith(HalfState const* that);
HalfState const* KillField(Node* object, Node* offset,
MachineRepresentation repr) const;
HalfState const* AddField(Node* object, Node* offset, Node* value,
MachineRepresentation repr) const;
FieldInfo Lookup(Node* object, Node* offset) const;
void Print() const;
private:
Zone* zone_;
using InnerMap = PersistentMap<Node*, FieldInfo>;
template <typename OuterKey>
using OuterMap = PersistentMap<OuterKey, InnerMap>;
// offset -> object -> info
using ConstantOffsetInfos = OuterMap<uint32_t>;
ConstantOffsetInfos fresh_entries_;
ConstantOffsetInfos constant_entries_;
ConstantOffsetInfos arbitrary_entries_;
// object -> offset -> info
using UnknownOffsetInfos = OuterMap<Node*>;
UnknownOffsetInfos fresh_unknown_entries_;
UnknownOffsetInfos constant_unknown_entries_;
UnknownOffsetInfos arbitrary_unknown_entries_;
// Update {map} so that {map.Get(outer_key).Get(inner_key)} returns {info}.
template <typename OuterKey>
@ -123,12 +112,43 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
MachineRepresentation repr, Zone* zone);
void KillOffsetInFresh(Node* object, uint32_t offset,
MachineRepresentation repr);
template <typename OuterKey>
static void IntersectWith(OuterMap<OuterKey>& to,
const OuterMap<OuterKey>& from);
static void Print(const ConstantOffsetInfos& infos);
static void Print(const UnknownOffsetInfos& infos);
Zone* zone_;
ConstantOffsetInfos fresh_entries_;
ConstantOffsetInfos constant_entries_;
ConstantOffsetInfos arbitrary_entries_;
UnknownOffsetInfos fresh_unknown_entries_;
UnknownOffsetInfos constant_unknown_entries_;
UnknownOffsetInfos arbitrary_unknown_entries_;
};
// An {AbstractState} consists of two {HalfState}s, representing the mutable
// and immutable sets of known fields, respectively. These sets correspond to
// LoadFromObject/StoreToObject and LoadImmutableFromObject/
// InitializeImmutableInObject respectively. The two half-states should not
// overlap.
struct AbstractState : public ZoneObject {
explicit AbstractState(Zone* zone)
: mutable_state(zone), immutable_state(zone) {}
explicit AbstractState(HalfState mutable_state, HalfState immutable_state)
: mutable_state(mutable_state), immutable_state(immutable_state) {}
bool Equals(AbstractState const* that) const {
return this->immutable_state.Equals(&that->immutable_state) &&
this->mutable_state.Equals(&that->mutable_state);
}
void IntersectWith(AbstractState const* that) {
mutable_state.IntersectWith(&that->mutable_state);
immutable_state.IntersectWith(&that->immutable_state);
}
HalfState mutable_state;
HalfState immutable_state;
};
Reduction ReduceLoadFromObject(Node* node, ObjectAccess const& access);

View File

@ -268,6 +268,13 @@ void Int64Lowering::LowerNode(Node* node) {
MachineType::Int32(), access.write_barrier_kind)));
break;
}
case IrOpcode::kLoadImmutableFromObject: {
ObjectAccess access = ObjectAccessOf(node->op());
LowerLoadOperator(node, access.machine_type.representation(),
simplified()->LoadImmutableFromObject(ObjectAccess(
MachineType::Int32(), access.write_barrier_kind)));
break;
}
case IrOpcode::kStore: {
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
LowerStoreOperator(
@ -291,6 +298,13 @@ void Int64Lowering::LowerNode(Node* node) {
MachineType::Int32(), access.write_barrier_kind)));
break;
}
case IrOpcode::kInitializeImmutableInObject: {
ObjectAccess access = ObjectAccessOf(node->op());
LowerStoreOperator(node, access.machine_type.representation(),
simplified()->InitializeImmutableInObject(ObjectAccess(
MachineType::Int32(), access.write_barrier_kind)));
break;
}
case IrOpcode::kStart: {
int parameter_count = GetParameterCountAfterLowering(signature());
// Only exchange the node if the parameter count actually changed.

View File

@ -84,12 +84,14 @@ Reduction MemoryLowering::Reduce(Node* node) {
case IrOpcode::kAllocateRaw:
return ReduceAllocateRaw(node);
case IrOpcode::kLoadFromObject:
case IrOpcode::kLoadImmutableFromObject:
return ReduceLoadFromObject(node);
case IrOpcode::kLoadElement:
return ReduceLoadElement(node);
case IrOpcode::kLoadField:
return ReduceLoadField(node);
case IrOpcode::kStoreToObject:
case IrOpcode::kInitializeImmutableInObject:
return ReduceStoreToObject(node);
case IrOpcode::kStoreElement:
return ReduceStoreElement(node);
@ -372,7 +374,8 @@ Reduction MemoryLowering::ReduceAllocateRaw(
}
Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
DCHECK(node->opcode() == IrOpcode::kLoadFromObject ||
node->opcode() == IrOpcode::kLoadImmutableFromObject);
ObjectAccess const& access = ObjectAccessOf(node->op());
MachineType machine_type = access.machine_type;
@ -492,7 +495,8 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
Reduction MemoryLowering::ReduceStoreToObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
DCHECK(node->opcode() == IrOpcode::kStoreToObject ||
node->opcode() == IrOpcode::kInitializeImmutableInObject);
ObjectAccess const& access = ObjectAccessOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);

View File

@ -37,6 +37,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoadElement:
case IrOpcode::kLoadField:
case IrOpcode::kLoadFromObject:
case IrOpcode::kLoadImmutableFromObject:
case IrOpcode::kLoadLane:
case IrOpcode::kLoadTransform:
case IrOpcode::kMemoryBarrier:
@ -53,6 +54,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStoreField:
case IrOpcode::kStoreLane:
case IrOpcode::kStoreToObject:
case IrOpcode::kInitializeImmutableInObject:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kUnreachable:
@ -217,12 +219,14 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
case IrOpcode::kCall:
return VisitCall(node, state);
case IrOpcode::kLoadFromObject:
case IrOpcode::kLoadImmutableFromObject:
return VisitLoadFromObject(node, state);
case IrOpcode::kLoadElement:
return VisitLoadElement(node, state);
case IrOpcode::kLoadField:
return VisitLoadField(node, state);
case IrOpcode::kStoreToObject:
case IrOpcode::kInitializeImmutableInObject:
return VisitStoreToObject(node, state);
case IrOpcode::kStoreElement:
return VisitStoreElement(node, state);
@ -306,7 +310,8 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
DCHECK(node->opcode() == IrOpcode::kLoadFromObject ||
node->opcode() == IrOpcode::kLoadImmutableFromObject);
Reduction reduction = memory_lowering()->ReduceLoadFromObject(node);
EnqueueUses(node, state);
if (V8_MAP_PACKING_BOOL && reduction.replacement() != node) {
@ -316,7 +321,8 @@ void MemoryOptimizer::VisitLoadFromObject(Node* node,
void MemoryOptimizer::VisitStoreToObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
DCHECK(node->opcode() == IrOpcode::kStoreToObject ||
node->opcode() == IrOpcode::kInitializeImmutableInObject);
memory_lowering()->ReduceStoreToObject(node, state);
EnqueueUses(node, state);
}

View File

@ -426,11 +426,13 @@
V(FastApiCall) \
V(FindOrderedHashMapEntry) \
V(FindOrderedHashMapEntryForInt32Key) \
V(InitializeImmutableInObject) \
V(LoadDataViewElement) \
V(LoadElement) \
V(LoadField) \
V(LoadFieldByIndex) \
V(LoadFromObject) \
V(LoadImmutableFromObject) \
V(LoadMessage) \
V(LoadStackArgument) \
V(LoadTypedElement) \

View File

@ -160,7 +160,9 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
const ObjectAccess& ObjectAccessOf(const Operator* op) {
DCHECK_NOT_NULL(op);
DCHECK(op->opcode() == IrOpcode::kLoadFromObject ||
op->opcode() == IrOpcode::kStoreToObject);
op->opcode() == IrOpcode::kLoadImmutableFromObject ||
op->opcode() == IrOpcode::kStoreToObject ||
op->opcode() == IrOpcode::kInitializeImmutableInObject);
return OpParameter<ObjectAccess>(op);
}
@ -1878,16 +1880,18 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeNumberEqual(
UNREACHABLE();
}
#define ACCESS_OP_LIST(V) \
V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0) \
V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0) \
V(LoadTypedElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
V(LoadFromObject, ObjectAccess, Operator::kNoWrite, 2, 1, 1) \
V(StoreTypedElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0) \
V(StoreToObject, ObjectAccess, Operator::kNoRead, 3, 1, 0) \
V(LoadDataViewElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
#define ACCESS_OP_LIST(V) \
V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0) \
V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0) \
V(LoadTypedElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
V(StoreTypedElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0) \
V(LoadFromObject, ObjectAccess, Operator::kNoWrite, 2, 1, 1) \
V(StoreToObject, ObjectAccess, Operator::kNoRead, 3, 1, 0) \
V(LoadImmutableFromObject, ObjectAccess, Operator::kNoWrite, 2, 1, 1) \
V(InitializeImmutableInObject, ObjectAccess, Operator::kNoRead, 3, 1, 0) \
V(LoadDataViewElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
V(StoreDataViewElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0)
#define ACCESS(Name, Type, properties, value_input_count, control_input_count, \

View File

@ -1068,10 +1068,22 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
Type value_type);
// load-from-object [base + offset]
// This operator comes in two flavors: LoadImmutableFromObject guarantees that
// the underlying object field will be initialized at most once for the
// duration of the program. This enables more optimizations in
// CsaLoadElimination.
// Note: LoadImmutableFromObject is unrelated to LoadImmutable and is lowered
// into a regular Load.
const Operator* LoadFromObject(ObjectAccess const&);
const Operator* LoadImmutableFromObject(ObjectAccess const&);
// store-to-object [base + offset], value
// This operator comes in two flavors: InitializeImmutableInObject guarantees
// that the underlying object field has not and will not be initialized again
// for the duration of the program. This enables more optimizations in
// CsaLoadElimination.
const Operator* StoreToObject(ObjectAccess const&);
const Operator* InitializeImmutableInObject(ObjectAccess const&);
// load-typed-element buffer, [base + external + index]
const Operator* LoadTypedElement(ExternalArrayType const&);

View File

@ -2174,6 +2174,7 @@ Type Typer::Visitor::TypeLoadStackArgument(Node* node) {
}
Type Typer::Visitor::TypeLoadFromObject(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeLoadImmutableFromObject(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
switch (ExternalArrayTypeOf(node->op())) {
@ -2204,6 +2205,9 @@ Type Typer::Visitor::TypeStoreMessage(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStoreElement(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStoreToObject(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeInitializeImmutableInObject(Node* node) {
UNREACHABLE();
}
Type Typer::Visitor::TypeTransitionAndStoreElement(Node* node) {
UNREACHABLE();

View File

@ -1562,6 +1562,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, ElementAccessOf(node->op()).type));
break;
case IrOpcode::kLoadFromObject:
case IrOpcode::kLoadImmutableFromObject:
CheckValueInputIs(node, 0, Type::Receiver());
break;
case IrOpcode::kLoadTypedElement:
@ -1584,6 +1585,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckNotTyped(node);
break;
case IrOpcode::kStoreToObject:
case IrOpcode::kInitializeImmutableInObject:
// TODO(gsps): Can we check some types here?
break;
case IrOpcode::kTransitionAndStoreElement:

View File

@ -84,28 +84,20 @@ MachineType assert_size(int expected_size, MachineType type) {
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), GetInstance(), \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset))
// TODO(11510): Using LoadImmutable for tagged values causes registers to be
// spilled and added to the safepoint table, resulting in large code size
// regressions. A possible solution would be to not spill the register at all,
// but rather reload the value from memory. This will require non-trivial
// changes in the register allocator and instuction selector.
#define LOAD_INSTANCE_FIELD(name, type) \
(CanBeTaggedOrCompressedPointer((type).representation()) \
? LOAD_MUTABLE_INSTANCE_FIELD(name, type) \
: gasm_->LoadImmutable( \
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), \
GetInstance(), \
wasm::ObjectAccess::ToTagged( \
WasmInstanceObject::k##name##Offset)))
#define LOAD_INSTANCE_FIELD(name, type) \
gasm_->LoadImmutableFromObject( \
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), GetInstance(), \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset))
#define LOAD_ROOT(root_name, factory_name) \
(parameter_mode_ == kNoSpecialParameterMode \
? graph()->NewNode(mcgraph()->common()->HeapConstant( \
isolate_->factory()->factory_name())) \
: gasm_->LoadImmutable(/* Root pointers do not get compressed. */ \
MachineType::Pointer(), BuildLoadIsolateRoot(), \
IsolateData::root_slot_offset( \
RootIndex::k##root_name)))
// Use MachineType::Pointer() over Tagged() to load root pointers because they
// do not get compressed.
#define LOAD_ROOT(root_name, factory_name) \
(parameter_mode_ == kNoSpecialParameterMode \
? graph()->NewNode(mcgraph()->common()->HeapConstant( \
isolate_->factory()->factory_name())) \
: gasm_->LoadImmutableFromObject( \
MachineType::Pointer(), BuildLoadIsolateRoot(), \
IsolateData::root_slot_offset(RootIndex::k##root_name)))
bool ContainsSimd(const wasm::FunctionSig* sig) {
for (auto type : sig->all()) {
@ -263,6 +255,16 @@ class WasmGraphAssembler : public GraphAssembler {
return LoadFromObject(type, base, IntPtrConstant(offset));
}
Node* LoadImmutableFromObject(MachineType type, Node* base, Node* offset) {
return AddNode(graph()->NewNode(simplified_.LoadImmutableFromObject(
ObjectAccess(type, kNoWriteBarrier)),
base, offset, effect(), control()));
}
Node* LoadImmutableFromObject(MachineType type, Node* base, int offset) {
return LoadImmutableFromObject(type, base, IntPtrConstant(offset));
}
Node* LoadImmutable(LoadRepresentation rep, Node* base, Node* offset) {
return AddNode(graph()->NewNode(mcgraph()->machine()->LoadImmutable(rep),
base, offset));
@ -283,6 +285,19 @@ class WasmGraphAssembler : public GraphAssembler {
return StoreToObject(access, base, IntPtrConstant(offset), value);
}
Node* InitializeImmutableInObject(ObjectAccess access, Node* base,
Node* offset, Node* value) {
return AddNode(
graph()->NewNode(simplified_.InitializeImmutableInObject(access), base,
offset, value, effect(), control()));
}
Node* InitializeImmutableInObject(ObjectAccess access, Node* base, int offset,
Node* value) {
return InitializeImmutableInObject(access, base, IntPtrConstant(offset),
value);
}
Node* IsI31(Node* object) {
if (COMPRESS_POINTERS_BOOL) {
return Word32Equal(Word32And(object, Int32Constant(kSmiTagMask)),
@ -295,8 +310,9 @@ class WasmGraphAssembler : public GraphAssembler {
// Maps and their contents.
Node* LoadMap(Node* object) {
Node* map_word = LoadFromObject(MachineType::TaggedPointer(), object,
HeapObject::kMapOffset - kHeapObjectTag);
Node* map_word =
LoadImmutableFromObject(MachineType::TaggedPointer(), object,
HeapObject::kMapOffset - kHeapObjectTag);
#ifdef V8_MAP_PACKING
return UnpackMapWord(map_word);
#else
@ -309,23 +325,23 @@ class WasmGraphAssembler : public GraphAssembler {
#ifdef V8_MAP_PACKING
map = PackMapWord(TNode<Map>::UncheckedCast(map));
#endif
StoreToObject(access, heap_object, HeapObject::kMapOffset - kHeapObjectTag,
map);
InitializeImmutableInObject(access, heap_object,
HeapObject::kMapOffset - kHeapObjectTag, map);
}
Node* LoadInstanceType(Node* map) {
return LoadFromObject(
return LoadImmutableFromObject(
MachineType::Uint16(), map,
wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset));
}
Node* LoadWasmTypeInfo(Node* map) {
int offset = Map::kConstructorOrBackPointerOrNativeContextOffset;
return LoadFromObject(MachineType::TaggedPointer(), map,
wasm::ObjectAccess::ToTagged(offset));
return LoadImmutableFromObject(MachineType::TaggedPointer(), map,
wasm::ObjectAccess::ToTagged(offset));
}
Node* LoadSupertypes(Node* wasm_type_info) {
return LoadFromObject(
return LoadImmutableFromObject(
MachineType::TaggedPointer(), wasm_type_info,
wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset));
}
@ -333,7 +349,7 @@ class WasmGraphAssembler : public GraphAssembler {
// FixedArrays.
Node* LoadFixedArrayLengthAsSmi(Node* fixed_array) {
return LoadFromObject(
return LoadImmutableFromObject(
MachineType::TaggedSigned(), fixed_array,
wasm::ObjectAccess::ToTagged(FixedArray::kLengthOffset));
}
@ -346,6 +362,15 @@ class WasmGraphAssembler : public GraphAssembler {
return LoadFromObject(type, fixed_array, offset);
}
Node* LoadImmutableFixedArrayElement(
Node* fixed_array, Node* index_intptr,
MachineType type = MachineType::AnyTagged()) {
Node* offset = IntAdd(
IntMul(index_intptr, IntPtrConstant(kTaggedSize)),
IntPtrConstant(wasm::ObjectAccess::ToTagged(FixedArray::kHeaderSize)));
return LoadImmutableFromObject(type, fixed_array, offset);
}
Node* LoadFixedArrayElement(Node* array, int index, MachineType type) {
return LoadFromObject(
type, array,
@ -404,14 +429,16 @@ class WasmGraphAssembler : public GraphAssembler {
}
Node* LoadExportedFunctionIndexAsSmi(Node* exported_function_data) {
return LoadFromObject(MachineType::TaggedSigned(), exported_function_data,
wasm::ObjectAccess::ToTagged(
WasmExportedFunctionData::kFunctionIndexOffset));
return LoadImmutableFromObject(
MachineType::TaggedSigned(), exported_function_data,
wasm::ObjectAccess::ToTagged(
WasmExportedFunctionData::kFunctionIndexOffset));
}
Node* LoadExportedFunctionInstance(Node* exported_function_data) {
return LoadFromObject(MachineType::TaggedPointer(), exported_function_data,
wasm::ObjectAccess::ToTagged(
WasmExportedFunctionData::kInstanceOffset));
return LoadImmutableFromObject(
MachineType::TaggedPointer(), exported_function_data,
wasm::ObjectAccess::ToTagged(
WasmExportedFunctionData::kInstanceOffset));
}
// JavaScript objects.
@ -431,8 +458,13 @@ class WasmGraphAssembler : public GraphAssembler {
Node* StoreStructField(Node* struct_object, const wasm::StructType* type,
uint32_t field_index, Node* value) {
return StoreToObject(ObjectAccessForGCStores(type->field(field_index)),
struct_object, FieldOffset(type, field_index), value);
ObjectAccess access = ObjectAccessForGCStores(type->field(field_index));
return type->mutability(field_index)
? StoreToObject(access, struct_object,
FieldOffset(type, field_index), value)
: InitializeImmutableInObject(access, struct_object,
FieldOffset(type, field_index),
value);
}
Node* WasmArrayElementOffset(Node* index, wasm::ValueType element_type) {
@ -445,7 +477,7 @@ class WasmGraphAssembler : public GraphAssembler {
}
Node* LoadWasmArrayLength(Node* array) {
return LoadFromObject(
return LoadImmutableFromObject(
MachineType::Uint32(), array,
wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset));
}
@ -3029,7 +3061,7 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
func_index_intptr, gasm_->IntPtrConstant(kSystemPointerSize));
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
Node* target_node = gasm_->LoadFromObject(
Node* target_node = gasm_->LoadImmutableFromObject(
MachineType::Pointer(), imported_targets, func_index_times_pointersize);
args[0] = target_node;
@ -3229,7 +3261,7 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* real_sig,
auto load_target = gasm_->MakeLabel();
auto end_label = gasm_->MakeLabel(MachineType::PointerRepresentation());
Node* ref_node = gasm_->LoadFromObject(
Node* ref_node = gasm_->LoadImmutableFromObject(
MachineType::TaggedPointer(), function,
wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset));
@ -3244,16 +3276,16 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* real_sig,
{
// Compute the call target from the (on-heap) wrapper code. The cached
// target can only be null for WasmJSFunctions.
Node* wrapper_code = gasm_->LoadFromObject(
Node* wrapper_code = gasm_->LoadImmutableFromObject(
MachineType::TaggedPointer(), function,
wasm::ObjectAccess::ToTagged(WasmInternalFunction::kCodeOffset));
Node* call_target;
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
CHECK(!V8_HEAP_SANDBOX_BOOL); // Not supported yet.
call_target =
gasm_->LoadFromObject(MachineType::Pointer(), wrapper_code,
wasm::ObjectAccess::ToTagged(
CodeDataContainer::kCodeEntryPointOffset));
call_target = gasm_->LoadImmutableFromObject(
MachineType::Pointer(), wrapper_code,
wasm::ObjectAccess::ToTagged(
CodeDataContainer::kCodeEntryPointOffset));
} else {
call_target = gasm_->IntAdd(
@ -3278,7 +3310,7 @@ void WasmGraphBuilder::CompareToInternalFunctionAtIndex(
Node** failure_control) {
// Since we are comparing to a function reference, it is guaranteed that
// instance->wasm_internal_functions() has been initialized.
Node* internal_functions = gasm_->LoadFromObject(
Node* internal_functions = gasm_->LoadImmutableFromObject(
MachineType::TaggedPointer(), GetInstance(),
wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kWasmInternalFunctionsOffset));
@ -5680,7 +5712,7 @@ Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index,
int size = WasmStruct::Size(type);
Node* s = gasm_->Allocate(size);
gasm_->StoreMap(s, rtt);
gasm_->StoreToObject(
gasm_->InitializeImmutableInObject(
ObjectAccess(MachineType::TaggedPointer(), kNoWriteBarrier), s,
wasm::ObjectAccess::ToTagged(JSReceiver::kPropertiesOrHashOffset),
LOAD_ROOT(EmptyFixedArray, empty_fixed_array));
@ -5765,8 +5797,13 @@ Node* WasmGraphBuilder::ArrayInit(uint32_t array_index,
for (int i = 0; i < static_cast<int>(elements.size()); i++) {
Node* offset =
gasm_->WasmArrayElementOffset(Int32Constant(i), element_type);
gasm_->StoreToObject(ObjectAccessForGCStores(element_type), array, offset,
elements[i]);
if (type->mutability()) {
gasm_->StoreToObject(ObjectAccessForGCStores(element_type), array, offset,
elements[i]);
} else {
gasm_->InitializeImmutableInObject(ObjectAccessForGCStores(element_type),
array, offset, elements[i]);
}
}
return array;
}
@ -5774,7 +5811,9 @@ Node* WasmGraphBuilder::ArrayInit(uint32_t array_index,
Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
Node* maps_list =
LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
return gasm_->LoadFixedArrayElementPtr(maps_list, type_index);
return gasm_->LoadImmutableFromObject(
MachineType::TaggedPointer(), maps_list,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(type_index));
}
Node* WasmGraphBuilder::RttSub(uint32_t type_index, Node* parent_rtt,
@ -5891,7 +5930,7 @@ void WasmGraphBuilder::TypeCheck(
callbacks.fail_if_not(gasm_->UintLessThan(rtt_depth, supertypes_length),
BranchHint::kTrue);
}
Node* maybe_match = gasm_->LoadFixedArrayElement(
Node* maybe_match = gasm_->LoadImmutableFixedArrayElement(
supertypes, rtt_depth, MachineType::TaggedPointer());
callbacks.fail_if_not(gasm_->TaggedEqual(maybe_match, rtt),
@ -6069,7 +6108,10 @@ Node* WasmGraphBuilder::StructGet(Node* struct_object,
MachineType machine_type = MachineType::TypeForRepresentation(
struct_type->field(field_index).machine_representation(), is_signed);
Node* offset = gasm_->FieldOffset(struct_type, field_index);
return gasm_->LoadFromObject(machine_type, struct_object, offset);
return struct_type->mutability(field_index)
? gasm_->LoadFromObject(machine_type, struct_object, offset)
: gasm_->LoadImmutableFromObject(machine_type, struct_object,
offset);
}
void WasmGraphBuilder::StructSet(Node* struct_object,
@ -6114,7 +6156,10 @@ Node* WasmGraphBuilder::ArrayGet(Node* array_object,
MachineType machine_type = MachineType::TypeForRepresentation(
type->element_type().machine_representation(), is_signed);
Node* offset = gasm_->WasmArrayElementOffset(index, type->element_type());
return gasm_->LoadFromObject(machine_type, array_object, offset);
return type->mutability()
? gasm_->LoadFromObject(machine_type, array_object, offset)
: gasm_->LoadImmutableFromObject(machine_type, array_object,
offset);
}
void WasmGraphBuilder::ArraySet(Node* array_object, const wasm::ArrayType* type,

View File

@ -29,8 +29,8 @@ Reduction WasmEscapeAnalysis::ReduceAllocateRaw(Node* node) {
for (Edge edge : node->use_edges()) {
if (NodeProperties::IsValueEdge(edge)) {
if (edge.index() != 0 ||
edge.from()->opcode() != IrOpcode::kStoreToObject) {
// The allocated object is used for something other than storing into.
(edge.from()->opcode() != IrOpcode::kStoreToObject &&
edge.from()->opcode() != IrOpcode::kInitializeImmutableInObject)) {
return NoChange();
}
value_edges.push_back(edge);
@ -43,7 +43,8 @@ Reduction WasmEscapeAnalysis::ReduceAllocateRaw(Node* node) {
DCHECK_EQ(edge.index(), 0);
Node* use = edge.from();
DCHECK(!use->IsDead());
DCHECK_EQ(use->opcode(), IrOpcode::kStoreToObject);
DCHECK(use->opcode() == IrOpcode::kStoreToObject ||
use->opcode() == IrOpcode::kInitializeImmutableInObject);
// The value stored by this StoreToObject node might be another allocation
// which has no more uses. Therefore we have to revisit it. Note that this
// will not happen automatically: ReplaceWithValue does not trigger revisits

View File

@ -782,6 +782,11 @@ Map HeapObject::map(PtrComprCageBase cage_base) const {
}
void HeapObject::set_map(Map value) {
#if V8_ENABLE_WEBASSEMBLY
// In {WasmGraphBuilder::SetMap} and {WasmGraphBuilder::LoadMap}, we treat
// maps as immutable. Therefore we are not allowed to mutate them here.
DCHECK(!value.IsWasmStructMap() && !value.IsWasmArrayMap());
#endif
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !value.is_null()) {
GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);

View File

@ -0,0 +1,136 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --experimental-wasm-gc --no-liftoff --experimental-wasm-nn-locals
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// Test that comparisons with array length in a loop get optimized away.
(function ArrayLoopOptimizationTest() {
var builder = new WasmModuleBuilder();
var array_index = builder.addArray(kWasmI32, true);
// Increase these parameters to measure performance.
let array_size = 10; // 100000000;
let iterations = 1; // 50;
builder.addFunction("array_inc", kSig_v_v)
.addLocals(wasmRefType(array_index), 1)
.addLocals(kWasmI32, 2)
// Locals: 0 -> array, 1 -> length, 2 -> index
.addBody([
...wasmI32Const(array_size),
kExprCallFunction, 1,
kExprLocalSet, 0,
// length = array.length
kExprLocalGet, 0,
kGCPrefix, kExprArrayLen, array_index,
kExprLocalSet, 1,
// while (true) {
kExprLoop, kWasmVoid,
// if (index < length) {
kExprLocalGet, 2,
kExprLocalGet, 1,
kExprI32LtU,
kExprIf, kWasmVoid,
// array[index] = array[index] + 5;
kExprLocalGet, 0,
kExprLocalGet, 2,
kExprLocalGet, 0,
kExprLocalGet, 2,
kGCPrefix, kExprArrayGet, array_index,
kExprI32Const, 5,
kExprI32Add,
kGCPrefix, kExprArraySet, array_index,
// index = index + 1;
kExprLocalGet, 2,
kExprI32Const, 1,
kExprI32Add,
kExprLocalSet, 2,
// continue;
kExprBr, 1,
// }
// break;
kExprEnd,
// }
kExprEnd])
.exportFunc();
builder.addFunction("make_array",
makeSig([kWasmI32], [wasmRefType(array_index)]))
.addBody([kExprLocalGet, 0, kGCPrefix, kExprArrayNewDefault, array_index])
var instance = builder.instantiate({});
let before = Date.now();
for (let i = 0; i < iterations; i++) {
instance.exports.array_inc();
}
let after = Date.now();
print(
"Average of " + iterations + " runs: " +
(after - before)/iterations + "ms");
})();
(function ImmutableLoadThroughEffect() {
var builder = new WasmModuleBuilder();
var struct = builder.addStructSubtype([
makeField(kWasmI32, false), makeField(kWasmI32, true)]);
let effect = builder.addImport('m', 'f', kSig_v_v);
builder.addFunction("main", kSig_i_i)
.addLocals(wasmRefType(struct), 1)
.addBody([
// Initialize an object
kExprLocalGet, 0,
kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add,
kGCPrefix, kExprStructNew, struct,
kExprLocalSet, 1,
// Introduce unknown effect
kExprCallFunction, effect,
// TF should be able to eliminate this load...
kExprLocalGet, 1,
kGCPrefix, kExprStructGet, struct, 0,
// ... but not this one.
kExprLocalGet, 1,
kGCPrefix, kExprStructGet, struct, 1,
kExprI32Add
])
.exportFunc();
var instance = builder.instantiate({m : { f: function () {} }});
assertEquals(85, instance.exports.main(42));
})();
(function FunctionTypeCheckThroughEffect() {
var builder = new WasmModuleBuilder();
var sig = builder.addType(kSig_i_i);
let effect = builder.addImport('m', 'f', kSig_v_v);
builder.addFunction("input", sig)
.addBody([kExprLocalGet, 0])
.exportFunc();
builder.addFunction("main", makeSig([wasmRefType(kWasmFuncRef)], [kWasmI32]))
.addBody([
// Type check the function
kExprLocalGet, 0, kGCPrefix, kExprRttCanon, sig, kGCPrefix, kExprRefCast,
kExprDrop,
// Introduce unknown effect
kExprCallFunction, effect,
// TF should be able to eliminate the second type check, and return the
// constant 1.
kExprLocalGet, 0, kGCPrefix, kExprRttCanon, sig,
kGCPrefix, kExprRefTest])
.exportFunc();
var instance = builder.instantiate({m : { f: function () {} }});
assertEquals(1, instance.exports.main(instance.exports.input));
})();