Revert "[wasm][liftoff] Spill regs for multi-value merges"

This reverts commit d9e1f2aee5.

Reason for revert: Linux test failures: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux/45960/overview

Original change's description:
> [wasm][liftoff] Spill regs for multi-value merges
>
> If there is more than one value in the merge region, a stack-to-stack
> move can overwrite the source of a stack-to-register move. To avoid
> this, spill all registers.
>
> R=​clemensb@chromium.org
>
> Bug: chromium:1299183
> Change-Id: I10495434d0a18c9072ee3882e00a687edd8c592a
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3523044
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#79584}

Bug: chromium:1299183
Change-Id: I465129695cfc1c5678923f7eefe5b91e31383798
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3546745
Auto-Submit: Shu-yu Guo <syg@chromium.org>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Owners-Override: Shu-yu Guo <syg@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79585}
This commit is contained in:
Shu-yu Guo 2022-03-23 19:33:36 +00:00 committed by V8 LUCI CQ
parent d9e1f2aee5
commit 7566979213
3 changed files with 25 additions and 268 deletions

View File

@ -1164,22 +1164,12 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
DCHECK_EQ(0, element_size_bytes(kind) % kSystemPointerSize); DCHECK_EQ(0, element_size_bytes(kind) % kSystemPointerSize);
int words = element_size_bytes(kind) / kSystemPointerSize; int words = element_size_bytes(kind) / kSystemPointerSize;
DCHECK_LE(1, words); DCHECK_LE(1, words);
// Make sure we move the words in the correct order in case there is an do {
// overlap between src and dst. liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_offset),
if (src_offset < dst_offset) { liftoff::GetStackSlot(dst_offset));
do { dst_offset -= kSystemPointerSize;
liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_offset), src_offset -= kSystemPointerSize;
liftoff::GetStackSlot(dst_offset)); } while (--words);
dst_offset -= kSystemPointerSize;
src_offset -= kSystemPointerSize;
} while (--words);
} else {
while (words--) {
liftoff::MoveStackValue(
this, liftoff::GetStackSlot(src_offset - words * kSystemPointerSize),
liftoff::GetStackSlot(dst_offset - words * kSystemPointerSize));
}
}
} }
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {

View File

@ -391,10 +391,6 @@ enum MergeAllowConstants : bool {
kConstantsAllowed = true, kConstantsAllowed = true,
kConstantsNotAllowed = false kConstantsNotAllowed = false
}; };
enum MergeAllowRegisters : bool {
kRegistersAllowed = true,
kRegistersNotAllowed = false
};
enum ReuseRegisters : bool { enum ReuseRegisters : bool {
kReuseRegisters = true, kReuseRegisters = true,
kNoReuseRegisters = false kNoReuseRegisters = false
@ -403,7 +399,6 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state,
const VarState* source, VarState* target, uint32_t count, const VarState* source, VarState* target, uint32_t count,
MergeKeepStackSlots keep_stack_slots, MergeKeepStackSlots keep_stack_slots,
MergeAllowConstants allow_constants, MergeAllowConstants allow_constants,
MergeAllowRegisters allow_registers,
ReuseRegisters reuse_registers, LiftoffRegList used_regs) { ReuseRegisters reuse_registers, LiftoffRegList used_regs) {
RegisterReuseMap register_reuse_map; RegisterReuseMap register_reuse_map;
for (const VarState* source_end = source + count; source < source_end; for (const VarState* source_end = source + count; source < source_end;
@ -414,21 +409,18 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state,
continue; continue;
} }
base::Optional<LiftoffRegister> reg; base::Optional<LiftoffRegister> reg;
if (allow_registers) { // First try: Keep the same register, if it's free.
// First try: Keep the same register, if it's free. if (source->is_reg() && state->is_free(source->reg())) {
if (source->is_reg() && state->is_free(source->reg())) { reg = source->reg();
reg = source->reg(); }
} // Second try: Use the same register we used before (if we reuse registers).
// Second try: Use the same register we used before (if we reuse if (!reg && reuse_registers) {
// registers). reg = register_reuse_map.Lookup(source->reg());
if (!reg && reuse_registers) { }
reg = register_reuse_map.Lookup(source->reg()); // Third try: Use any free register.
} RegClass rc = reg_class_for(source->kind());
// Third try: Use any free register. if (!reg && state->has_unused_register(rc, used_regs)) {
RegClass rc = reg_class_for(source->kind()); reg = state->unused_register(rc, used_regs);
if (!reg && state->has_unused_register(rc, used_regs)) {
reg = state->unused_register(rc, used_regs);
}
} }
if (!reg) { if (!reg) {
// No free register; make this a stack slot. // No free register; make this a stack slot.
@ -477,17 +469,9 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
for (auto& src : base::VectorOf(source_begin, num_locals)) { for (auto& src : base::VectorOf(source_begin, num_locals)) {
if (src.is_reg()) used_regs.set(src.reg()); if (src.is_reg()) used_regs.set(src.reg());
} }
// If there is more than one operand in the merge region, a stack-to-stack for (auto& src :
// move can interfere with a register reload, which would not be handled base::VectorOf(source_begin + stack_base + discarded, arity)) {
// correctly by the StackTransferRecipe. To avoid this, spill all registers in if (src.is_reg()) used_regs.set(src.reg());
// this region.
MergeAllowRegisters allow_registers =
arity <= 1 ? kRegistersAllowed : kRegistersNotAllowed;
if (allow_registers) {
for (auto& src :
base::VectorOf(source_begin + stack_base + discarded, arity)) {
if (src.is_reg()) used_regs.set(src.reg());
}
} }
// Initialize the merge region. If this region moves, try to turn stack slots // Initialize the merge region. If this region moves, try to turn stack slots
@ -496,8 +480,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
discarded == 0 ? kKeepStackSlots : kTurnStackSlotsIntoRegisters; discarded == 0 ? kKeepStackSlots : kTurnStackSlotsIntoRegisters;
InitMergeRegion(this, source_begin + stack_base + discarded, InitMergeRegion(this, source_begin + stack_base + discarded,
target_begin + stack_base, arity, keep_merge_stack_slots, target_begin + stack_base, arity, keep_merge_stack_slots,
kConstantsNotAllowed, allow_registers, kNoReuseRegisters, kConstantsNotAllowed, kNoReuseRegisters, used_regs);
used_regs);
// Shift spill offsets down to keep slots contiguous. // Shift spill offsets down to keep slots contiguous.
int offset = stack_base == 0 ? StaticStackFrameSize() int offset = stack_base == 0 ? StaticStackFrameSize()
: source.stack_state[stack_base - 1].offset(); : source.stack_state[stack_base - 1].offset();
@ -510,8 +493,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
// Initialize the locals region. Here, stack slots stay stack slots (because // Initialize the locals region. Here, stack slots stay stack slots (because
// they do not move). Try to keep register in registers, but avoid duplicates. // they do not move). Try to keep register in registers, but avoid duplicates.
InitMergeRegion(this, source_begin, target_begin, num_locals, kKeepStackSlots, InitMergeRegion(this, source_begin, target_begin, num_locals, kKeepStackSlots,
kConstantsNotAllowed, kRegistersAllowed, kNoReuseRegisters, kConstantsNotAllowed, kNoReuseRegisters, used_regs);
used_regs);
// Consistency check: All the {used_regs} are really in use now. // Consistency check: All the {used_regs} are really in use now.
DCHECK_EQ(used_regs, used_registers & used_regs); DCHECK_EQ(used_regs, used_registers & used_regs);
@ -521,7 +503,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
// source region, ensure to use the same register twice in the target region. // source region, ensure to use the same register twice in the target region.
InitMergeRegion(this, source_begin + num_locals, target_begin + num_locals, InitMergeRegion(this, source_begin + num_locals, target_begin + num_locals,
stack_depth, kKeepStackSlots, kConstantsAllowed, stack_depth, kKeepStackSlots, kConstantsAllowed,
kRegistersAllowed, kReuseRegisters, used_regs); kReuseRegisters, used_regs);
} }
void LiftoffAssembler::CacheState::Steal(const CacheState& source) { void LiftoffAssembler::CacheState::Steal(const CacheState& source) {
@ -803,7 +785,7 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
transfers.TransferStackSlot(target.stack_state[target_stack_base + i], transfers.TransferStackSlot(target.stack_state[target_stack_base + i],
cache_state_.stack_state[stack_base + i]); cache_state_.stack_state[stack_base + i]);
DCHECK(!SlotInterference( DCHECK(!SlotInterference(
target.stack_state[target_stack_base + i], target.stack_state[i],
base::VectorOf(cache_state_.stack_state.data() + stack_base + i + 1, base::VectorOf(cache_state_.stack_state.data() + stack_base + i + 1,
arity - i - 1))); arity - i - 1)));
} }

View File

@ -1,215 +0,0 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --wasm-staging --experimental-wasm-gc
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
builder.addStruct([]);
builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
builder.addType(makeSig([kWasmAnyRef, kWasmFuncRef, kWasmExternRef], [wasmRefType(0)]));
builder.addType(makeSig([kWasmI64, kWasmF32, kWasmS128, kWasmI32], [wasmRefType(1), wasmOptRefType(2), kWasmI64, wasmOptRefType(2), kWasmI64]));
builder.addType(makeSig([], [wasmOptRefType(2), wasmOptRefType(2), kWasmF64, wasmOptRefType(2), kWasmI32, wasmOptRefType(2), kWasmI32, kWasmI32, wasmOptRefType(2), kWasmI32, kWasmI32, kWasmI64, kWasmI32, kWasmS128, wasmOptRefType(2)]));
builder.addType(makeSig([], []));
builder.addType(makeSig([wasmRefType(kWasmAnyRef)], [kWasmI32, kWasmI32, wasmRefType(1), wasmRefType(kWasmAnyRef), kWasmI32, wasmRefType(1), kWasmI64, wasmOptRefType(4), kWasmI32, wasmRefType(kWasmAnyRef), wasmOptRefType(4), kWasmI64, kWasmI64, wasmRefType(kWasmEqRef), kWasmI32]));
builder.addType(makeSig([wasmRefType(kWasmEqRef), kWasmAnyRef, kWasmI32, kWasmI32], [wasmRefType(1), kWasmI64, wasmOptRefType(4), kWasmI32, wasmRefType(kWasmAnyRef), wasmOptRefType(4), kWasmI64, kWasmI64, wasmRefType(kWasmEqRef), kWasmI32]));
builder.addType(makeSig([kWasmI32, kWasmI32, wasmRefType(1), wasmRefType(kWasmAnyRef), kWasmI32, wasmRefType(1), kWasmI64, wasmOptRefType(4), kWasmI32, wasmRefType(kWasmAnyRef), wasmOptRefType(4), kWasmI64, kWasmI64, wasmRefType(kWasmEqRef), kWasmI32], [kWasmI32]));
builder.addMemory(16, 32, false);
builder.addTable(kWasmFuncRef, 4, 5, undefined)
builder.addTable(kWasmFuncRef, 15, 25, undefined)
builder.addTable(kWasmFuncRef, 1, 1, undefined)
builder.addTable(kWasmFuncRef, 16, 17, undefined)
builder.addActiveElementSegment(0, WasmInitExpr.I32Const(0), [WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3)], kWasmFuncRef);
builder.addActiveElementSegment(1, WasmInitExpr.I32Const(0), [WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2)], kWasmFuncRef);
builder.addActiveElementSegment(2, WasmInitExpr.I32Const(0), [WasmInitExpr.RefFunc(0)], kWasmFuncRef);
builder.addActiveElementSegment(3, WasmInitExpr.I32Const(0), [WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3)], kWasmFuncRef);
builder.addTag(makeSig([], []));
// Generate function 1 (out of 4).
builder.addFunction(undefined, 1 /* sig */)
.addLocals(kWasmI64, 1).addLocals(wasmOptRefType(4), 1).addLocals(kWasmI32, 2).addLocals(kWasmI64, 1).addLocals(wasmOptRefType(4), 1).addLocals(kWasmI32, 1).addLocals(kWasmI64, 3).addLocals(kWasmI32, 1).addLocals(kWasmI64, 1).addLocals(kWasmI32, 1).addLocals(kWasmI64, 1).addLocals(wasmOptRefType(4), 1).addLocals(kWasmI64, 1)
.addBodyWithEnd([
// signature: i_iii
// body:
kExprRefFunc, 0x01, // ref.func
kExprBlock, 0x06, // block @32 i32 i32 (ref 1) (ref any) i32 (ref 1) i64 (ref null 4) i32 (ref any) (ref null 4) i64 i64 (ref eq) i32
kExprDrop, // drop
kExprI32Const, 0xf1, 0x00, // i32.const
kExprI64Const, 0x00, // i64.const
kExprI64Const, 0xe1, 0x00, // i64.const
kExprI64Const, 0x00, // i64.const
kExprI64Const, 0xef, 0x00, // i64.const
kExprI32Const, 0x00, // i32.const
kSimdPrefix, kExprI8x16Splat, // i8x16.splat
kExprI32Const, 0xf0, 0x02, // i32.const
kSimdPrefix, kExprI64x2ShrU, 0x01, // i64x2.shr_u
kSimdPrefix, kExprI32x4BitMask, 0x01, // i32x4.bitmask
kExprI32Const, 0x00, // i32.const
kExprRefFunc, 0x00, // ref.func
kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
kExprI32Const, 0x00, // i32.const
kExprRefFunc, 0x00, // ref.func
kExprI64Const, 0x00, // i64.const
kExprRefNull, 0x04, // ref.null
kExprI32Const, 0x00, // i32.const
kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
kExprRefNull, 0x04, // ref.null
kExprI64Const, 0x00, // i64.const
kExprI64Const, 0x00, // i64.const
kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
kExprI32Const, 0x00, // i32.const
kExprRefNull, 0x6e, // ref.null
kExprBrOnNull, 0x00, // br_on_null
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprI64ShrU, // i64.shr_u
kExprI64Ror, // i64.ror
kExprI64ShrS, // i64.shr_s
kExprI64Const, 0x01, // i64.const
kSimdPrefix, kExprS128Const, 0xff, 0x01, 0x0d, 0x00, 0x70, 0x70, 0x71, 0x3a, 0x00, 0x00, 0x00, 0x73, 0x01, 0x6f, 0x70, 0x71, // s128.const
kSimdPrefix, kExprI64x2ExtractLane, 0x01, // i64x2.extract_lane
kExprI64ShrS, // i64.shr_s
kExprI64Ror, // i64.ror
kAtomicPrefix, kExprI64AtomicStore16U, 0x01, 0xef, 0xc2, 0xbd, 0x8b, 0x06, // i64.atomic.store16_u
kSimdPrefix, kExprS128Const, 0x71, 0x6f, 0x61, 0x61, 0x6f, 0x70, 0x00, 0x01, 0x70, 0x00, 0x71, 0x70, 0x3a, 0x70, 0x00, 0x00, // s128.const
kSimdPrefix, kExprI32x4BitMask, 0x01, // i32x4.bitmask
kExprRefNull, 0x03, // ref.null
kExprRefNull, 0x70, // ref.null
kExprRefNull, 0x6f, // ref.null
kExprI32Const, 0x01, // i32.const
kExprCallIndirect, 0x02, 0x00, // call_indirect sig #2: r_nnn
kExprDrop, // drop
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x00, // i32.const
kExprCallIndirect, 0x01, 0x00, // call_indirect sig #1: i_iii
kExprNop, // nop
kExprI64Const, 0xe1, 0x00, // i64.const
kExprI32Const, 0x00, // i32.const
kAtomicPrefix, kExprI64AtomicLoad, 0x02, 0xe0, 0x8c, 0xbc, 0x03, // i64.atomic.load64
kExprI64ShrU, // i64.shr_u
kAtomicPrefix, kExprI64AtomicStore8U, 0x00, 0x80, 0x82, 0x7c, // i64.atomic.store8_u
kExprBlock, 0x40, // block @219
kExprEnd, // end @221
kExprBlock, 0x7f, // block @222 i32
kExprI32Const, 0x00, // i32.const
kExprEnd, // end @226
kExprI32Const, 0xe3, 0x00, // i32.const
kSimdPrefix, kExprI8x16Splat, // i8x16.splat
kExprI32Const, 0xe3, 0x00, // i32.const
kSimdPrefix, kExprI8x16Splat, // i8x16.splat
kSimdPrefix, kExprI32x4BitMask, 0x01, // i32x4.bitmask
kSimdPrefix, kExprI64x2ShrS, 0x01, // i64x2.shr_s
kSimdPrefix, kExprI32x4BitMask, 0x01, // i32x4.bitmask
kExprRefFunc, 0x00, // ref.func
kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
kExprI32Const, 0x00, // i32.const
kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
kExprRefNull, 0x6e, // ref.null
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x00, // i32.const
kExprBlock, 0x07, // block @268 (ref 1) i64 (ref null 4) i32 (ref any) (ref null 4) i64 i64 (ref eq) i32
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprRefFunc, 0x00, // ref.func
kExprI64Const, 0x00, // i64.const
kExprRefNull, 0x04, // ref.null
kExprI32Const, 0x00, // i32.const
kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
kExprRefNull, 0x04, // ref.null
kExprI64Const, 0x00, // i64.const
kExprI64Const, 0x00, // i64.const
kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
kExprI32Const, 0x00, // i32.const
kExprEnd, // end @302
kExprEnd, // end @303
kExprBlock, 0x08, // block @304 i32
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprDrop, // drop
kExprNop, // nop
kExprEnd, // end @321
kExprEnd, // end @322
]);
// Generate function 2 (out of 4).
builder.addFunction(undefined, 2 /* sig */)
.addBodyWithEnd([
// signature: r_nnn
// body:
kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
kExprEnd, // end @7
]);
// Generate function 3 (out of 4).
builder.addFunction(undefined, 3 /* sig */)
.addBodyWithEnd([
// signature: rnlnl_lfsi
// body:
kExprRefFunc, 0x00, // ref.func
kExprRefNull, 0x02, // ref.null
kExprI64Const, 0x00, // i64.const
kExprRefNull, 0x02, // ref.null
kExprI64Const, 0x00, // i64.const
kExprEnd, // end @11
]);
// Generate function 4 (out of 4).
builder.addFunction(undefined, 4 /* sig */)
.addBodyWithEnd([
// signature: nndniniiniilisn_v
// body:
kExprRefNull, 0x02, // ref.null
kExprRefNull, 0x02, // ref.null
kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
kExprRefNull, 0x02, // ref.null
kExprI32Const, 0x00, // i32.const
kExprRefNull, 0x02, // ref.null
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x00, // i32.const
kExprRefNull, 0x02, // ref.null
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x00, // i32.const
kExprI64Const, 0x00, // i64.const
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x00, // i32.const
kSimdPrefix, kExprI8x16Splat, // i8x16.splat
kExprRefNull, 0x02, // ref.null
kExprEnd, // end @40
]);
builder.addExport('main', 0);
const instance = builder.instantiate();
assertEquals(0, instance.exports.main(1, 2, 3));