diff --git a/src/assembler.cc b/src/assembler.cc index e9542013f5..4205b94aae 100644 --- a/src/assembler.cc +++ b/src/assembler.cc @@ -213,12 +213,6 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() { #endif } -void RelocInfo::set_wasm_context_reference(Address address, - ICacheFlushMode icache_flush_mode) { - DCHECK(IsWasmContextReference(rmode_)); - set_embedded_address(address, icache_flush_mode); -} - void RelocInfo::set_global_handle(Address address, ICacheFlushMode icache_flush_mode) { DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE); @@ -242,11 +236,6 @@ Address RelocInfo::global_handle() const { return embedded_address(); } -Address RelocInfo::wasm_context_reference() const { - DCHECK(IsWasmContextReference(rmode_)); - return embedded_address(); -} - void RelocInfo::set_target_address(Address target, WriteBarrierMode write_barrier_mode, ICacheFlushMode icache_flush_mode) { @@ -546,8 +535,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) { return "constant pool"; case VENEER_POOL: return "veneer pool"; - case WASM_CONTEXT_REFERENCE: - return "wasm context reference"; case WASM_GLOBAL_HANDLE: return "global handle"; case WASM_CALL: @@ -650,7 +637,6 @@ void RelocInfo::Verify(Isolate* isolate) { case DEOPT_ID: case CONST_POOL: case VENEER_POOL: - case WASM_CONTEXT_REFERENCE: case WASM_GLOBAL_HANDLE: case WASM_CALL: case JS_TO_WASM_CALL: diff --git a/src/assembler.h b/src/assembler.h index d26894ed6b..6a5b0b17f1 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -366,10 +366,6 @@ class RelocInfo { // Please note the order is important (see IsCodeTarget, IsGCRelocMode). CODE_TARGET, EMBEDDED_OBJECT, - // Wasm entries are to relocate pointers into the wasm memory embedded in - // wasm code. Everything after WASM_CONTEXT_REFERENCE (inclusive) is not - // GC'ed. - WASM_CONTEXT_REFERENCE, WASM_GLOBAL_HANDLE, WASM_CALL, JS_TO_WASM_CALL, @@ -466,15 +462,12 @@ class RelocInfo { return mode == OFF_HEAP_TARGET; } static inline bool IsNone(Mode mode) { return mode == NONE; } - static inline bool IsWasmContextReference(Mode mode) { - return mode == WASM_CONTEXT_REFERENCE; - } static inline bool IsWasmReference(Mode mode) { return IsWasmPtrReference(mode); } static inline bool IsWasmPtrReference(Mode mode) { - return mode == WASM_CONTEXT_REFERENCE || mode == WASM_GLOBAL_HANDLE || - mode == WASM_CALL || mode == JS_TO_WASM_CALL; + return mode == WASM_GLOBAL_HANDLE || mode == WASM_CALL || + mode == JS_TO_WASM_CALL; } static constexpr int ModeMask(Mode mode) { return 1 << mode; } @@ -509,14 +502,10 @@ class RelocInfo { // constant pool, otherwise the pointer is embedded in the instruction stream. bool IsInConstantPool(); - Address wasm_context_reference() const; Address global_handle() const; Address js_to_wasm_address() const; Address wasm_call_address() const; - void set_wasm_context_reference( - Address address, - ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); void set_target_address( Address target, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER, diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc index 6d8dc489c4..8248715876 100644 --- a/src/compiler/int64-lowering.cc +++ b/src/compiler/int64-lowering.cc @@ -281,15 +281,15 @@ void Int64Lowering::LowerNode(Node* node) { static_cast(signature()->parameter_count())) { int old_index = ParameterIndexOf(node->op()); // TODO(wasm): Make this part not wasm specific. - // Prevent special lowering of the WasmContext parameter. - if (old_index == kWasmContextParameterIndex) { + // Prevent special lowering of the instance parameter. + if (old_index == kWasmInstanceParameterIndex) { DefaultLowering(node); break; } // Adjust old_index to be compliant with the signature. --old_index; int new_index = GetParameterIndexAfterLowering(signature(), old_index); - // Adjust new_index to consider the WasmContext parameter. + // Adjust new_index to consider the instance parameter. ++new_index; NodeProperties::ChangeOp(node, common()->Parameter(new_index)); diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index 3ee91f356d..48e63d127f 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -58,6 +58,21 @@ namespace compiler { FATAL("Unsupported opcode 0x%x:%s", (opcode), \ wasm::WasmOpcodes::OpcodeName(opcode)); +#define WASM_INSTANCE_OBJECT_OFFSET(name) \ + (WasmInstanceObject::k##name##Offset - kHeapObjectTag) + +#define LOAD_INSTANCE_FIELD(name, type) \ + graph()->NewNode( \ + jsgraph()->machine()->Load(type), instance_node_.get(), \ + jsgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), *effect_, \ + *control_) + +#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \ + graph()->NewNode(jsgraph()->machine()->Load(MachineType::TaggedPointer()), \ + array_node, \ + jsgraph()->Int32Constant(FixedArrayOffsetMinusTag(index)), \ + *effect_, *control_); + namespace { constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2; @@ -89,7 +104,6 @@ WasmGraphBuilder::WasmGraphBuilder( jsgraph_(jsgraph), centry_stub_node_(jsgraph_->HeapConstant(centry_stub)), env_(env), - function_tables_(zone), cur_buffer_(def_buffer_), cur_bufsize_(kDefaultBufferSize), has_simd_(ContainsSimd(sig)), @@ -2532,22 +2546,22 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function, Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets, wasm::WasmCodePosition position, - Node* wasm_context, bool use_retpoline) { - if (wasm_context == nullptr) { - DCHECK_NOT_NULL(wasm_context_); - wasm_context = wasm_context_.get(); + Node* instance_node, bool use_retpoline) { + if (instance_node == nullptr) { + DCHECK_NOT_NULL(instance_node_); + instance_node = instance_node_.get(); } SetNeedsStackCheck(); const size_t params = sig->parameter_count(); - const size_t extra = 3; // wasm_context, effect, and control. + const size_t extra = 3; // instance_node, effect, and control. const size_t count = 1 + params + extra; // Reallocate the buffer to make space for extra inputs. args = Realloc(args, 1 + params, count); - // Make room for the wasm_context parameter at index 1, just after code. + // Make room for the instance_node parameter at index 1, just after code. memmove(&args[2], &args[1], params * sizeof(Node*)); - args[1] = wasm_context; + args[1] = instance_node; // Add effect and control inputs. args[params + 2] = *effect_; @@ -2582,12 +2596,32 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets, DCHECK_NULL(args[0]); wasm::FunctionSig* sig = env_->module->functions[index].sig; - // Just encode the function index. This will be patched at instantiation. - Address code = reinterpret_cast
(index); - args[0] = jsgraph()->RelocatableIntPtrConstant( - reinterpret_cast(code), RelocInfo::WASM_CALL); + if (env_ && index < env_->module->num_imported_functions) { + // A call to an imported function. + // Load the instance from the imported_instances array at a known offset. + Node* imported_instances = LOAD_INSTANCE_FIELD( + ImportedFunctionInstances, MachineType::TaggedPointer()); + Node* instance_node = LOAD_FIXED_ARRAY_SLOT(imported_instances, index); - return BuildWasmCall(sig, args, rets, position); + // Load the target from the imported_targets array at a known offset. + Node* imported_targets = + LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer()); + Node* target_node = graph()->NewNode( + jsgraph()->machine()->Load(MachineType::Pointer()), imported_targets, + jsgraph()->Int32Constant(index * sizeof(Address)), + jsgraph()->graph()->start(), jsgraph()->graph()->start()); + args[0] = target_node; + return BuildWasmCall(sig, args, rets, position, instance_node); + + } else { + // A call to a function in this module. + // Just encode the function index. This will be patched at instantiation. + Address code = reinterpret_cast
(index); + args[0] = jsgraph()->RelocatableIntPtrConstant( + reinterpret_cast(code), RelocInfo::WASM_CALL); + + return BuildWasmCall(sig, args, rets, position); + } } Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args, @@ -2597,18 +2631,16 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args, DCHECK_NOT_NULL(env_); // Assume only one table for now. - uint32_t table_index = 0; wasm::FunctionSig* sig = env_->module->signatures[sig_index]; - Node* table = nullptr; - Node* table_size = nullptr; - GetFunctionTableNodes(table_index, &table, &table_size); + Node* ift_size = + LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32()); + MachineOperatorBuilder* machine = jsgraph()->machine(); Node* key = args[0]; // Bounds check against the table size. - Node* in_bounds = - graph()->NewNode(machine->Uint32LessThan(), key, table_size); + Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, ift_size); TrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position); // Mask the key to prevent SSCA. @@ -2618,55 +2650,55 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args, graph()->NewNode(machine->Word32Xor(), key, Int32Constant(-1)); Node* masked_diff = graph()->NewNode( machine->Word32And(), - graph()->NewNode(machine->Int32Sub(), key, table_size), neg_key); + graph()->NewNode(machine->Int32Sub(), key, ift_size), neg_key); Node* mask = graph()->NewNode(machine->Word32Sar(), masked_diff, Int32Constant(31)); key = graph()->NewNode(machine->Word32And(), key, mask); } // Load signature from the table and check. - // The table is a FixedArray; signatures are encoded as SMIs. - // [sig1, code1, sig2, code2, sig3, code3, ...] - static_assert(compiler::kFunctionTableEntrySize == 2, "consistency"); - static_assert(compiler::kFunctionTableSignatureOffset == 0, "consistency"); - static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency"); + Node* ift_sig_ids = + LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, MachineType::Pointer()); - int32_t canonical_sig_num = env_->module->signature_ids[sig_index]; - - // The table entries are {IndirectFunctionTableEntry} structs. + int32_t expected_sig_id = env_->module->signature_ids[sig_index]; Node* scaled_key = - graph()->NewNode(machine->Int32Mul(), key, - Int32Constant(sizeof(IndirectFunctionTableEntry))); + graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2)); const Operator* add = nullptr; if (machine->Is64()) { - scaled_key = graph()->NewNode(machine->ChangeInt32ToInt64(), scaled_key); + scaled_key = graph()->NewNode(machine->ChangeUint32ToUint64(), scaled_key); add = machine->Int64Add(); } else { add = machine->Int32Add(); } - Node* entry_address = graph()->NewNode(add, table, scaled_key); - Node* loaded_sig = graph()->NewNode( - machine->Load(MachineType::Int32()), entry_address, - Int32Constant(offsetof(IndirectFunctionTableEntry, sig_id)), *effect_, - *control_); + + Node* loaded_sig = + graph()->NewNode(machine->Load(MachineType::Int32()), ift_sig_ids, + scaled_key, *effect_, *control_); Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig, - Int32Constant(canonical_sig_num)); + Int32Constant(expected_sig_id)); TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position); - Node* target = graph()->NewNode( - machine->Load(MachineType::Pointer()), entry_address, - Int32Constant(offsetof(IndirectFunctionTableEntry, target)), *effect_, - *control_); + Node* ift_targets = + LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets, MachineType::Pointer()); + Node* ift_instances = LOAD_INSTANCE_FIELD(IndirectFunctionTableInstances, + MachineType::TaggedPointer()); - Node* loaded_context = graph()->NewNode( - machine->Load(MachineType::Pointer()), entry_address, - Int32Constant(offsetof(IndirectFunctionTableEntry, context)), *effect_, - *control_); + scaled_key = graph()->NewNode(machine->Word32Shl(), key, + Int32Constant(kPointerSizeLog2)); + + Node* target = graph()->NewNode(machine->Load(MachineType::Pointer()), + ift_targets, scaled_key, *effect_, *control_); + + auto access = AccessBuilder::ForFixedArrayElement(); + Node* target_instance = graph()->NewNode( + machine->Load(MachineType::TaggedPointer()), + graph()->NewNode(add, ift_instances, scaled_key), + Int32Constant(access.header_size - access.tag()), *effect_, *control_); args[0] = target; - return BuildWasmCall(sig, args, rets, position, loaded_context); + return BuildWasmCall(sig, args, rets, position, target_instance); } Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) { @@ -2996,11 +3028,11 @@ Node* WasmGraphBuilder::BuildHeapNumberValueIndexConstant() { return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag); } -void WasmGraphBuilder::BuildJSToWasmWrapper(wasm::WasmCode* wasm_code, - Address wasm_context_address) { +void WasmGraphBuilder::BuildJSToWasmWrapper(Handle weak_instance, + wasm::WasmCode* wasm_code) { const int wasm_count = static_cast(sig_->parameter_count()); const int count = - wasm_count + 4; // wasm_code, wasm_context, effect, and control. + wasm_count + 4; // wasm_code, instance_node, effect, and control. Node** args = Buffer(count); // Build the start and the JS parameter nodes. @@ -3014,13 +3046,19 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(wasm::WasmCode* wasm_code, Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"), graph()->start()); - // Create the wasm_context node to pass as parameter. This must be a - // RelocatableIntPtrConstant because JSToWasm wrappers are compiled at module - // compile time and patched at instance build time. - DCHECK_NULL(wasm_context_); - wasm_context_ = jsgraph()->RelocatableIntPtrConstant( - reinterpret_cast(wasm_context_address), - RelocInfo::WASM_CONTEXT_REFERENCE); + // Create the instance_node node to pass as parameter. This is either + // an actual reference to an instance or a placeholder reference, + // since JSToWasm wrappers can be compiled at module compile time and + // patched at instance build time. + DCHECK_NULL(instance_node_); + // TODO(titzer): JSToWasmWrappers should load the instance from the + // incoming JSFunction, but this is currently too slow/too complex because + // we use a regular JS property with a private symbol. + instance_node_ = graph()->NewNode( + jsgraph()->machine()->Load(MachineType::TaggedPointer()), + jsgraph()->HeapConstant(weak_instance), + jsgraph()->Int32Constant(WeakCell::kValueOffset - kHeapObjectTag), + *effect_, *control_); Address instr_start = wasm_code == nullptr ? nullptr : wasm_code->instructions().start(); @@ -3038,7 +3076,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(wasm::WasmCode* wasm_code, // the wasm function could not be re-imported into another wasm module. int pos = 0; args[pos++] = wasm_code_node; - args[pos++] = wasm_context_.get(); + args[pos++] = instance_node_.get(); args[pos++] = *effect_; args[pos++] = *control_; @@ -3054,7 +3092,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(wasm::WasmCode* wasm_code, int pos = 0; args[pos++] = wasm_code_node; - args[pos++] = wasm_context_.get(); + args[pos++] = instance_node_.get(); // Convert JS parameters to wasm numbers. for (int i = 0; i < wasm_count; ++i) { @@ -3089,42 +3127,15 @@ int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count, wasm::FunctionSig* sig) { // Convert wasm numbers to JS values. for (int i = 0; i < param_count; ++i) { - Node* param = Param(i + 1); // Start from index 1 to drop the wasm_context. + Node* param = + Param(i + 1); // Start from index 1 to drop the instance_node. args[pos++] = ToJS(param, sig->GetParam(i)); } return pos; } -Node* WasmGraphBuilder::LoadImportDataAtOffset(int offset, Node* table) { - offset = FixedArray::OffsetOfElementAt(offset) - kHeapObjectTag; - Node* offset_node = jsgraph()->Int32Constant(offset); - Node* import_data = graph()->NewNode( - jsgraph()->machine()->Load(LoadRepresentation::TaggedPointer()), table, - offset_node, *effect_, *control_); - *effect_ = import_data; - return import_data; -} - -Node* WasmGraphBuilder::LoadNativeContext(Node* table) { - // The js_imports_table is set up so that index 0 has isolate->native_context - return LoadImportDataAtOffset(0, table); -} - -int OffsetForImportData(int index, WasmGraphBuilder::ImportDataType type) { - // The js_imports_table is set up so that index 0 has isolate->native_context - // and for every index, 3*index+1 has the JSReceiver, 3*index+2 has function's - // global proxy and 3*index+3 has function's context. - return 3 * index + type; -} - -Node* WasmGraphBuilder::LoadImportData(int index, ImportDataType type, - Node* table) { - return LoadImportDataAtOffset(OffsetForImportData(index, type), table); -} - -bool WasmGraphBuilder::BuildWasmToJSWrapper( - Handle target, Handle global_js_imports_table, - int index) { +bool WasmGraphBuilder::BuildWasmToJSWrapper(Handle target, + int index) { DCHECK(target->IsCallable()); int wasm_count = static_cast(sig_->parameter_count()); @@ -3136,22 +3147,30 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper( *effect_ = start; *control_ = start; - // We add the target function to a table and look it up during runtime. This - // ensures that if the GC kicks in, it doesn't need to patch the code for the - // JS function. - // js_imports_table is fixed array with global handle scope whose lifetime is - // tied to the instance. - // TODO(aseemgarg): explore using per-import global handle instead of a table - Node* table_ptr = jsgraph()->IntPtrConstant( - reinterpret_cast(global_js_imports_table.location())); - Node* table = graph()->NewNode( - jsgraph()->machine()->Load(LoadRepresentation::TaggedPointer()), - table_ptr, jsgraph()->IntPtrConstant(0), *effect_, *control_); - *effect_ = table; + instance_node_.set(Param(compiler::kWasmInstanceParameterIndex)); + Node* callables_node = LOAD_INSTANCE_FIELD(ImportedFunctionCallables, + MachineType::TaggedPointer()); + Node* callable_node = LOAD_FIXED_ARRAY_SLOT(callables_node, index); + Node* undefined_node = + jsgraph()->Constant(handle(isolate->heap()->undefined_value(), isolate)); + + Node* compiled_module = + LOAD_INSTANCE_FIELD(CompiledModule, MachineType::TaggedPointer()); + // TODO(wasm): native context is only weak because of recycling compiled + // modules. + Node* weak_native_context = graph()->NewNode( + jsgraph()->machine()->Load(MachineType::TaggedPointer()), compiled_module, + jsgraph()->Int32Constant(WasmCompiledModule::kNativeContextOffset - + kHeapObjectTag), + *effect_, *control_); + Node* native_context = graph()->NewNode( + jsgraph()->machine()->Load(MachineType::TaggedPointer()), + weak_native_context, + jsgraph()->Int32Constant(WeakCell::kValueOffset - kHeapObjectTag), + *effect_, *control_); if (!wasm::IsJSCompatibleSignature(sig_)) { // Throw a TypeError. - Node* native_context = LoadNativeContext(table); BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, native_context, nullptr, 0); // We don't need to return a value here, as the runtime call will not return @@ -3161,25 +3180,30 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper( } Node** args = Buffer(wasm_count + 9); - Node* call = nullptr; BuildModifyThreadInWasmFlag(false); if (target->IsJSFunction()) { Handle function = Handle::cast(target); + FieldAccess field_access = AccessBuilder::ForJSFunctionContext(); + Node* function_context = graph()->NewNode( + jsgraph()->machine()->Load(MachineType::TaggedPointer()), callable_node, + jsgraph()->Int32Constant(field_access.offset - field_access.tag()), + *effect_, *control_); + if (!IsClassConstructor(function->shared()->kind())) { if (function->shared()->internal_formal_parameter_count() == wasm_count) { int pos = 0; - args[pos++] = - LoadImportData(index, kFunction, table); // target callable. + args[pos++] = callable_node; // target callable. // Receiver. if (is_sloppy(function->shared()->language_mode()) && !function->shared()->native()) { - args[pos++] = LoadImportData(index, kGlobalProxy, table); + Node* global_proxy = LOAD_FIXED_ARRAY_SLOT( + native_context, Context::GLOBAL_PROXY_INDEX); + args[pos++] = global_proxy; } else { - args[pos++] = jsgraph()->Constant( - handle(isolate->heap()->undefined_value(), isolate)); + args[pos++] = undefined_node; } call_descriptor = Linkage::GetJSCallDescriptor( @@ -3188,9 +3212,9 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper( // Convert wasm numbers to JS values. pos = AddParameterNodes(args, pos, wasm_count, sig_); - args[pos++] = jsgraph()->UndefinedConstant(); // new target + args[pos++] = undefined_node; // new target args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count - args[pos++] = LoadImportData(index, kFunctionContext, table); + args[pos++] = function_context; args[pos++] = *effect_; args[pos++] = *control_; @@ -3200,24 +3224,24 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper( Callable callable = CodeFactory::ArgumentAdaptor(isolate); int pos = 0; args[pos++] = jsgraph()->HeapConstant(callable.code()); - args[pos++] = - LoadImportData(index, kFunction, table); // target callable - args[pos++] = jsgraph()->UndefinedConstant(); // new target + args[pos++] = callable_node; // target callable + args[pos++] = undefined_node; // new target args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count args[pos++] = jsgraph()->Int32Constant( function->shared()->internal_formal_parameter_count()); // Receiver. if (is_sloppy(function->shared()->language_mode()) && !function->shared()->native()) { - args[pos++] = LoadImportData(index, kGlobalProxy, table); + Node* global_proxy = LOAD_FIXED_ARRAY_SLOT( + native_context, Context::GLOBAL_PROXY_INDEX); + args[pos++] = global_proxy; } else { - args[pos++] = jsgraph()->Constant( - handle(isolate->heap()->undefined_value(), isolate)); + args[pos++] = undefined_node; } // Convert wasm numbers to JS values. pos = AddParameterNodes(args, pos, wasm_count, sig_); - args[pos++] = LoadImportData(index, kFunctionContext, table); + args[pos++] = function_context; args[pos++] = *effect_; args[pos++] = *control_; call = graph()->NewNode( @@ -3229,16 +3253,15 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper( } } - Node* native_context = nullptr; + // We cannot call the target directly, we have to use the Call builtin. if (!call) { int pos = 0; // We cannot call the target directly, we have to use the Call builtin. Callable callable = CodeFactory::Call(isolate); args[pos++] = jsgraph()->HeapConstant(callable.code()); - args[pos++] = LoadImportData(index, kFunction, table); // target callable. - args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count - args[pos++] = jsgraph()->Constant( - handle(isolate->heap()->undefined_value(), isolate)); // receiver + args[pos++] = callable_node; + args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count + args[pos++] = undefined_node; // receiver call_descriptor = Linkage::GetStubCallDescriptor( isolate, graph()->zone(), callable.descriptor(), wasm_count + 1, @@ -3252,7 +3275,6 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper( // is only needed if the target is a constructor to throw a TypeError, if // the target is a native function, or if the target is a callable JSObject, // which can only be constructed by the runtime. - native_context = LoadNativeContext(table); args[pos++] = native_context; args[pos++] = *effect_; args[pos++] = *control_; @@ -3264,15 +3286,13 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper( *effect_ = call; SetSourcePosition(call, 0); - BuildModifyThreadInWasmFlag(true); - // Convert the return value back. Node* val = sig_->return_count() == 0 ? jsgraph()->Int32Constant(0) - : FromJS(call, - native_context != nullptr ? native_context - : LoadNativeContext(table), - sig_->GetReturn()); + : FromJS(call, native_context, sig_->GetReturn()); + + BuildModifyThreadInWasmFlag(true); + Return(val); return true; } @@ -3286,41 +3306,6 @@ bool HasInt64ParamOrReturn(wasm::FunctionSig* sig) { } } // namespace -void WasmGraphBuilder::BuildWasmToWasmWrapper(wasm::WasmCode* wasm_code, - Address new_context_address) { - int wasm_count = static_cast(sig_->parameter_count()); - int count = wasm_count + 4; // wasm_code, wasm_context, effect, and control. - Node** args = Buffer(count); - - // Build the start node. - Node* start = Start(count + 1); - *control_ = start; - *effect_ = start; - - int pos = 0; - // Add the wasm code target. - Address instr_start = - wasm_code == nullptr ? nullptr : wasm_code->instructions().start(); - args[pos++] = jsgraph()->RelocatableIntPtrConstant( - reinterpret_cast(instr_start), RelocInfo::JS_TO_WASM_CALL); - // Add the wasm_context of the other instance. - args[pos++] = jsgraph()->IntPtrConstant( - reinterpret_cast(new_context_address)); - // Add the parameters starting from index 1 since the parameter with index 0 - // is the old wasm_context. - for (int i = 0; i < wasm_count; ++i) { - args[pos++] = Param(i + 1); - } - args[pos++] = *effect_; - args[pos++] = *control_; - - // Tail-call the wasm code. - auto call_descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig_); - Node* tail_call = graph()->NewNode( - jsgraph()->common()->TailCall(call_descriptor), count, args); - MergeControlToEnd(jsgraph(), tail_call); -} - void WasmGraphBuilder::BuildWasmInterpreterEntry(uint32_t func_index) { int param_count = static_cast(sig_->parameter_count()); @@ -3354,7 +3339,7 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(uint32_t func_index) { for (int i = 0; i < param_count; ++i) { wasm::ValueType type = sig_->GetParam(i); - // Start from the parameter with index 1 to drop the wasm_context. + // Start from the parameter with index 1 to drop the instance_node. *effect_ = graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer, Int32Constant(offset), Param(i + 1), *effect_, *control_); @@ -3401,16 +3386,16 @@ void WasmGraphBuilder::BuildCWasmEntry() { machine->Load(MachineType::Pointer()), foreign_code_obj, Int32Constant(Foreign::kForeignAddressOffset - kHeapObjectTag), *effect_, *control_); - Node* wasm_context = Param(CWasmEntryParameters::kWasmContext + 1); + Node* instance_node = Param(CWasmEntryParameters::kWasmInstance + 1); Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1); int wasm_arg_count = static_cast(sig_->parameter_count()); - int arg_count = wasm_arg_count + 4; // code, wasm_context, control, effect + int arg_count = wasm_arg_count + 4; // code, instance_node, control, effect Node** args = Buffer(arg_count); int pos = 0; args[pos++] = code_obj; - args[pos++] = wasm_context; + args[pos++] = instance_node; int offset = 0; for (wasm::ValueType type : sig_->parameters()) { @@ -3458,48 +3443,46 @@ void WasmGraphBuilder::BuildCWasmEntry() { } } -void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) { - DCHECK_NOT_NULL(wasm_context_); +void WasmGraphBuilder::InitInstanceCache( + WasmInstanceCacheNodes* instance_cache) { + DCHECK_NOT_NULL(instance_node_); DCHECK_NOT_NULL(*control_); DCHECK_NOT_NULL(*effect_); // Load the memory start. Node* mem_start = graph()->NewNode( - jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(), - jsgraph()->Int32Constant( - static_cast(offsetof(WasmContext, mem_start))), + jsgraph()->machine()->Load(MachineType::UintPtr()), instance_node_.get(), + jsgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemoryStart)), *effect_, *control_); *effect_ = mem_start; - context_cache->mem_start = mem_start; + instance_cache->mem_start = mem_start; // Load the memory size. Node* mem_size = graph()->NewNode( - jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(), - jsgraph()->Int32Constant( - static_cast(offsetof(WasmContext, mem_size))), + jsgraph()->machine()->Load(MachineType::Uint32()), instance_node_.get(), + jsgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemorySize)), *effect_, *control_); *effect_ = mem_size; - context_cache->mem_size = mem_size; + instance_cache->mem_size = mem_size; if (untrusted_code_mitigations_) { // Load the memory mask. Node* mem_mask = graph()->NewNode( - jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(), - jsgraph()->Int32Constant( - static_cast(offsetof(WasmContext, mem_mask))), + jsgraph()->machine()->Load(MachineType::Uint32()), instance_node_.get(), + jsgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemoryMask)), *effect_, *control_); *effect_ = mem_mask; - context_cache->mem_mask = mem_mask; + instance_cache->mem_mask = mem_mask; } else { // Explicitly set to nullptr to ensure a SEGV when we try to use it. - context_cache->mem_mask = nullptr; + instance_cache->mem_mask = nullptr; } } -void WasmGraphBuilder::PrepareContextCacheForLoop( - WasmContextCacheNodes* context_cache, Node* control) { +void WasmGraphBuilder::PrepareInstanceCacheForLoop( + WasmInstanceCacheNodes* instance_cache, Node* control) { #define INTRODUCE_PHI(field, rep) \ - context_cache->field = Phi(rep, 1, &context_cache->field, control); + instance_cache->field = Phi(rep, 1, &instance_cache->field, control); INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation()); INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32); @@ -3510,9 +3493,9 @@ void WasmGraphBuilder::PrepareContextCacheForLoop( #undef INTRODUCE_PHI } -void WasmGraphBuilder::NewContextCacheMerge(WasmContextCacheNodes* to, - WasmContextCacheNodes* from, - Node* merge) { +void WasmGraphBuilder::NewInstanceCacheMerge(WasmInstanceCacheNodes* to, + WasmInstanceCacheNodes* from, + Node* merge) { #define INTRODUCE_PHI(field, rep) \ if (to->field != from->field) { \ Node* vals[] = {to->field, from->field}; \ @@ -3528,9 +3511,9 @@ void WasmGraphBuilder::NewContextCacheMerge(WasmContextCacheNodes* to, #undef INTRODUCE_PHI } -void WasmGraphBuilder::MergeContextCacheInto(WasmContextCacheNodes* to, - WasmContextCacheNodes* from, - Node* merge) { +void WasmGraphBuilder::MergeInstanceCacheInto(WasmInstanceCacheNodes* to, + WasmInstanceCacheNodes* from, + Node* merge) { to->mem_size = CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge, to->mem_size, from->mem_size); to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(), @@ -3574,21 +3557,21 @@ Node* WasmGraphBuilder::CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode, void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type, uint32_t offset, Node** base_node, Node** offset_node) { - DCHECK_NOT_NULL(wasm_context_); + DCHECK_NOT_NULL(instance_node_); if (globals_start_ == nullptr) { - // Load globals_start from the WasmContext at runtime. + // Load globals_start from the instance object at runtime. // TODO(wasm): we currently generate only one load of the {globals_start} // start per graph, which means it can be placed anywhere by the scheduler. // This is legal because the globals_start should never change. - // However, in some cases (e.g. if the WasmContext is already in a + // However, in some cases (e.g. if the instance object is already in a // register), it is slightly more efficient to reload this value from the - // WasmContext. Since this depends on register allocation, it is not + // instance object. Since this depends on register allocation, it is not // possible to express in the graph, and would essentially constitute a // "mem2reg" optimization in TurboFan. globals_start_ = graph()->NewNode( - jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(), - jsgraph()->Int32Constant( - static_cast(offsetof(WasmContext, globals_start))), + jsgraph()->machine()->Load(MachineType::UintPtr()), + instance_node_.get(), + jsgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(GlobalsStart)), graph()->start(), graph()->start()); } *base_node = globals_start_.get(); @@ -3605,8 +3588,8 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type, } Node* WasmGraphBuilder::MemBuffer(uint32_t offset) { - DCHECK_NOT_NULL(context_cache_); - Node* mem_start = context_cache_->mem_start; + DCHECK_NOT_NULL(instance_cache_); + Node* mem_start = instance_cache_->mem_start; DCHECK_NOT_NULL(mem_start); if (offset == 0) return mem_start; return graph()->NewNode(jsgraph()->machine()->IntAdd(), mem_start, @@ -3616,8 +3599,8 @@ Node* WasmGraphBuilder::MemBuffer(uint32_t offset) { Node* WasmGraphBuilder::CurrentMemoryPages() { // CurrentMemoryPages can not be called from asm.js. DCHECK_EQ(wasm::kWasmOrigin, env_->module->origin()); - DCHECK_NOT_NULL(context_cache_); - Node* mem_size = context_cache_->mem_size; + DCHECK_NOT_NULL(instance_cache_); + Node* mem_size = instance_cache_->mem_size; DCHECK_NOT_NULL(mem_size); if (jsgraph()->machine()->Is64()) { mem_size = graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(), @@ -3628,23 +3611,6 @@ Node* WasmGraphBuilder::CurrentMemoryPages() { jsgraph()->Int32Constant(WhichPowerOf2(wasm::kWasmPageSize))); } -void WasmGraphBuilder::GetFunctionTableNodes(uint32_t table_index, Node** table, - Node** table_size) { - // The table address and size are stored in the WasmContext. - // Don't bother caching them, since they are only used in indirect calls, - // which would cause them to be spilled on the stack anyway. - *table = graph()->NewNode( - jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(), - jsgraph()->Int32Constant( - static_cast(offsetof(WasmContext, table))), - *effect_, *control_); - *table_size = graph()->NewNode( - jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(), - jsgraph()->Int32Constant( - static_cast(offsetof(WasmContext, table_size))), - *effect_, *control_); -} - Node* WasmGraphBuilder::BuildModifyThreadInWasmFlag(bool new_value) { // TODO(eholk): generate code to modify the thread-local storage directly, // rather than calling the runtime. @@ -3741,8 +3707,8 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, wasm::WasmCodePosition position, EnforceBoundsCheck enforce_check) { if (FLAG_wasm_no_bounds_checks) return Uint32ToUintptr(index); - DCHECK_NOT_NULL(context_cache_); - Node* mem_size = context_cache_->mem_size; + DCHECK_NOT_NULL(instance_cache_); + Node* mem_size = instance_cache_->mem_size; DCHECK_NOT_NULL(mem_size); auto m = jsgraph()->machine(); @@ -3810,7 +3776,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, if (untrusted_code_mitigations_) { // In the fallthrough case, condition the index with the memory mask. - Node* mem_mask = context_cache_->mem_mask; + Node* mem_mask = instance_cache_->mem_mask; DCHECK_NOT_NULL(mem_mask); index = graph()->NewNode(m->Word32And(), index, mem_mask); } @@ -3987,9 +3953,9 @@ Node* GetAsmJsOOBValue(MachineRepresentation rep, JSGraph* jsgraph) { } // namespace Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) { - DCHECK_NOT_NULL(context_cache_); - Node* mem_start = context_cache_->mem_start; - Node* mem_size = context_cache_->mem_size; + DCHECK_NOT_NULL(instance_cache_); + Node* mem_start = instance_cache_->mem_start; + Node* mem_size = instance_cache_->mem_size; DCHECK_NOT_NULL(mem_start); DCHECK_NOT_NULL(mem_size); @@ -4007,7 +3973,7 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) { if (untrusted_code_mitigations_) { // Condition the index with the memory mask. - Node* mem_mask = context_cache_->mem_mask; + Node* mem_mask = instance_cache_->mem_mask; DCHECK_NOT_NULL(mem_mask); index = graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask); @@ -4033,9 +3999,9 @@ Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) { Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index, Node* val) { - DCHECK_NOT_NULL(context_cache_); - Node* mem_start = context_cache_->mem_start; - Node* mem_size = context_cache_->mem_size; + DCHECK_NOT_NULL(instance_cache_); + Node* mem_start = instance_cache_->mem_start; + Node* mem_size = instance_cache_->mem_size; DCHECK_NOT_NULL(mem_start); DCHECK_NOT_NULL(mem_size); @@ -4051,7 +4017,7 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index, if (untrusted_code_mitigations_) { // Condition the index with the memory mask. - Node* mem_mask = context_cache_->mem_mask; + Node* mem_mask = instance_cache_->mem_mask; DCHECK_NOT_NULL(mem_mask); index = graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask); @@ -4666,8 +4632,8 @@ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag, } // namespace Handle CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module, + Handle weak_instance, wasm::WasmCode* wasm_code, uint32_t index, - Address wasm_context_address, bool use_trap_handler) { const wasm::WasmFunction* func = &module->functions[index]; @@ -4693,7 +4659,7 @@ Handle CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module, CEntryStub(isolate, 1).GetCode(), func->sig); builder.set_control_ptr(&control); builder.set_effect_ptr(&effect); - builder.BuildJSToWasmWrapper(wasm_code, wasm_context_address); + builder.BuildJSToWasmWrapper(weak_instance, wasm_code); //---------------------------------------------------------------------------- // Run the compilation pipeline. @@ -4778,10 +4744,10 @@ void ValidateImportWrapperReferencesImmovables(Handle wrapper) { } // namespace -Handle CompileWasmToJSWrapper( - Isolate* isolate, Handle target, wasm::FunctionSig* sig, - uint32_t index, wasm::ModuleOrigin origin, bool use_trap_handler, - Handle global_js_imports_table) { +Handle CompileWasmToJSWrapper(Isolate* isolate, Handle target, + wasm::FunctionSig* sig, uint32_t index, + wasm::ModuleOrigin origin, + bool use_trap_handler) { //---------------------------------------------------------------------------- // Create the Graph //---------------------------------------------------------------------------- @@ -4807,19 +4773,8 @@ Handle CompileWasmToJSWrapper( source_position_table); builder.set_control_ptr(&control); builder.set_effect_ptr(&effect); - if (builder.BuildWasmToJSWrapper(target, global_js_imports_table, index)) { - global_js_imports_table->set( - OffsetForImportData(index, WasmGraphBuilder::kFunction), *target); - if (target->IsJSFunction()) { - Handle function = Handle::cast(target); - global_js_imports_table->set( - OffsetForImportData(index, WasmGraphBuilder::kFunctionContext), - function->context()); - global_js_imports_table->set( - OffsetForImportData(index, WasmGraphBuilder::kGlobalProxy), - function->context()->global_proxy()); - } - } + builder.BuildWasmToJSWrapper(target, index); + if (FLAG_trace_turbo_graph) { // Simple textual RPO. OFStream os(stdout); os << "-- Graph after change lowering -- " << std::endl; @@ -4844,14 +4799,7 @@ Handle CompileWasmToJSWrapper( Handle code = Pipeline::GenerateCodeForTesting( &info, isolate, incoming, &graph, nullptr, source_position_table); ValidateImportWrapperReferencesImmovables(code); - Handle deopt_data = isolate->factory()->NewFixedArray(2, TENURED); - intptr_t loc = reinterpret_cast(global_js_imports_table.location()); - Handle loc_handle = isolate->factory()->NewHeapNumberFromBits(loc); - deopt_data->set(0, *loc_handle); - Handle index_handle = isolate->factory()->NewNumberFromInt( - OffsetForImportData(index, WasmGraphBuilder::kFunction)); - deopt_data->set(1, *index_handle); - code->set_deoptimization_data(*deopt_data); + #ifdef ENABLE_DISASSEMBLER if (FLAG_print_opt_code && !code.is_null()) { CodeTracer::Scope tracing_scope(isolate->GetCodeTracer()); @@ -4868,81 +4816,8 @@ Handle CompileWasmToJSWrapper( return code; } -Handle CompileWasmToWasmWrapper(Isolate* isolate, wasm::WasmCode* target, - wasm::FunctionSig* sig, - Address new_wasm_context_address) { - //---------------------------------------------------------------------------- - // Create the Graph - //---------------------------------------------------------------------------- - Zone zone(isolate->allocator(), ZONE_NAME); - Graph graph(&zone); - CommonOperatorBuilder common(&zone); - MachineOperatorBuilder machine( - &zone, MachineType::PointerRepresentation(), - InstructionSelector::SupportedMachineOperatorFlags(), - InstructionSelector::AlignmentRequirements()); - JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine); - - Node* control = nullptr; - Node* effect = nullptr; - - ModuleEnv env(nullptr, target->HasTrapHandlerIndex()); - WasmGraphBuilder builder(&env, &zone, &jsgraph, Handle(), sig); - builder.set_control_ptr(&control); - builder.set_effect_ptr(&effect); - builder.BuildWasmToWasmWrapper(target, new_wasm_context_address); - if (HasInt64ParamOrReturn(sig)) builder.LowerInt64(); - - if (FLAG_trace_turbo_graph) { // Simple textual RPO. - OFStream os(stdout); - os << "-- Graph after change lowering -- " << std::endl; - os << AsRPO(graph); - } - - // Schedule and compile to machine code. - CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig); - if (machine.Is32()) { - incoming = GetI32WasmCallDescriptor(&zone, incoming); - } - bool debugging = -#if DEBUG - true; -#else - FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph; -#endif - Vector func_name = ArrayVector("wasm-to-wasm"); - static unsigned id = 0; - Vector buffer; - if (debugging) { - buffer = Vector::New(128); - int chars = SNPrintF(buffer, "wasm-to-wasm#%d", id); - func_name = Vector::cast(buffer.SubVector(0, chars)); - } - - CompilationInfo info(func_name, &zone, Code::WASM_TO_WASM_FUNCTION); - Handle code = - Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph); -#ifdef ENABLE_DISASSEMBLER - if (FLAG_print_opt_code && !code.is_null()) { - CodeTracer::Scope tracing_scope(isolate->GetCodeTracer()); - OFStream os(tracing_scope.file()); - code->Disassemble(buffer.start(), os); - } -#endif - if (debugging) { - buffer.Dispose(); - } - if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) { - RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code, - "wasm-to-wasm"); - } - - return code; -} - Handle CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index, - wasm::FunctionSig* sig, - Handle instance) { + wasm::FunctionSig* sig) { //---------------------------------------------------------------------------- // Create the Graph //---------------------------------------------------------------------------- @@ -5066,6 +4941,11 @@ Handle CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) { return code; } +int FixedArrayOffsetMinusTag(uint32_t index) { + auto access = AccessBuilder::ForFixedArraySlot(index); + return access.offset - access.tag(); +} + SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction( double* decode_ms) { base::ElapsedTimer decode_timer; @@ -5381,6 +5261,9 @@ wasm::WasmCode* WasmCompilationUnit::CompileWasmFunction( #undef WASM_64 #undef FATAL_UNSUPPORTED_OPCODE +#undef WASM_INSTANCE_OBJECT_OFFSET +#undef LOAD_INSTANCE_FIELD +#undef LOAD_FIXED_ARRAY_SLOT } // namespace compiler } // namespace internal diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h index 41d4612de1..0fe0c7984d 100644 --- a/src/compiler/wasm-compiler.h +++ b/src/compiler/wasm-compiler.h @@ -43,34 +43,12 @@ class WasmCode; namespace compiler { -// Indirect function tables contain a pair for each entry. -enum FunctionTableEntries : int { - kFunctionTableSignatureOffset = 0, - kFunctionTableCodeOffset = 1, - kFunctionTableEntrySize = 2 -}; -constexpr inline int FunctionTableSigOffset(int i) { - return kFunctionTableEntrySize * i + kFunctionTableSignatureOffset; -} -constexpr inline int FunctionTableCodeOffset(int i) { - return kFunctionTableEntrySize * i + kFunctionTableCodeOffset; -} - // The {ModuleEnv} encapsulates the module data that is used by the -// {WasmGraphBuilder} during graph building. It represents the parameters to -// which the compiled code should be specialized, including which code to call -// for direct calls {function_code}, which tables to use for indirect calls -// {function_tables}, memory start address and size {mem_start, mem_size}, -// as well as the module itself {module}. +// {WasmGraphBuilder} during graph building. // ModuleEnvs are shareable across multiple compilations. struct ModuleEnv { // A pointer to the decoded module's static representation. const wasm::WasmModule* module; - // The function tables are FixedArrays of pairs used to signature - // check and dispatch indirect calls. It has the same length as - // module.function_tables. We use the address to a global handle to the - // FixedArray. - const std::vector
function_tables; // True if trap handling should be used in compiled code, rather than // compiling in bounds checks for each memory access. @@ -78,12 +56,6 @@ struct ModuleEnv { ModuleEnv(const wasm::WasmModule* module, bool use_trap_handler) : module(module), use_trap_handler(use_trap_handler) {} - - ModuleEnv(const wasm::WasmModule* module, - std::vector
function_tables, bool use_trap_handler) - : module(module), - function_tables(std::move(function_tables)), - use_trap_handler(use_trap_handler) {} }; enum RuntimeExceptionSupport : bool { @@ -183,35 +155,28 @@ class WasmCompilationUnit final { }; // Wraps a JS function, producing a code object that can be called from wasm. -// The global_js_imports_table is a global handle to a fixed array of target -// JSReceiver with the lifetime tied to the module. We store it's location (non -// GCable) in the generated code so that it can reside outside of GCed heap. Handle CompileWasmToJSWrapper(Isolate* isolate, Handle target, wasm::FunctionSig* sig, uint32_t index, wasm::ModuleOrigin origin, - bool use_trap_handler, - Handle global_js_imports_table); + bool use_trap_handler); // Wraps a given wasm code object, producing a code object. V8_EXPORT_PRIVATE Handle CompileJSToWasmWrapper( - Isolate* isolate, wasm::WasmModule* module, wasm::WasmCode* wasm_code, - uint32_t index, Address wasm_context_address, bool use_trap_handler); - -// Wraps a wasm function, producing a code object that can be called from other -// wasm instances (the WasmContext address must be changed). -Handle CompileWasmToWasmWrapper(Isolate* isolate, wasm::WasmCode* target, - wasm::FunctionSig* sig, - Address new_wasm_context_address); + Isolate* isolate, wasm::WasmModule* module, Handle weak_instance, + wasm::WasmCode* wasm_code, uint32_t index, bool use_trap_handler); // Compiles a stub that redirects a call to a wasm function to the wasm // interpreter. It's ABI compatible with the compiled wasm function. Handle CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index, - wasm::FunctionSig* sig, - Handle instance); + wasm::FunctionSig* sig); + +// Helper function to get the offset into a fixed array for a given {index}. +// TODO(titzer): access-builder.h is not accessible outside compiler. Move? +int FixedArrayOffsetMinusTag(uint32_t index); enum CWasmEntryParameters { kCodeObject, - kWasmContext, + kWasmInstance, kArgumentsBuffer, // marker: kNumParameters @@ -222,12 +187,11 @@ enum CWasmEntryParameters { // buffer and calls the wasm function given as first parameter. Handle CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig); -// Values from the {WasmContext} are cached between WASM-level function calls. +// Values from the instance object are cached between WASM-level function calls. // This struct allows the SSA environment handling this cache to be defined // and manipulated in wasm-compiler.{h,cc} instead of inside the WASM decoder. -// (Note that currently, the globals base is immutable in a context, so not -// cached here.) -struct WasmContextCacheNodes { +// (Note that currently, the globals base is immutable, so not cached here.) +struct WasmInstanceCacheNodes { Node* mem_start; Node* mem_size; Node* mem_mask; @@ -335,29 +299,16 @@ class WasmGraphBuilder { Node* CallIndirect(uint32_t index, Node** args, Node*** rets, wasm::WasmCodePosition position); - void BuildJSToWasmWrapper(wasm::WasmCode* wasm_code_start, - Address wasm_context_address); - enum ImportDataType { - kFunction = 1, - kGlobalProxy = 2, - kFunctionContext = 3, - }; - Node* LoadImportDataAtOffset(int offset, Node* table); - Node* LoadNativeContext(Node* table); - Node* LoadImportData(int index, ImportDataType type, Node* table); + void BuildJSToWasmWrapper(Handle weak_instance, + wasm::WasmCode* wasm_code); bool BuildWasmToJSWrapper(Handle target, - Handle global_js_imports_table, int index); - void BuildWasmToWasmWrapper(wasm::WasmCode* wasm_code_start, - Address new_wasm_context_address); void BuildWasmInterpreterEntry(uint32_t func_index); void BuildCWasmEntry(); Node* ToJS(Node* node, wasm::ValueType type); Node* FromJS(Node* node, Node* js_context, wasm::ValueType type); Node* Invert(Node* node); - void GetFunctionTableNodes(uint32_t table_index, Node** table, - Node** table_size); //----------------------------------------------------------------------- // Operations that concern the linear memory. @@ -375,8 +326,8 @@ class WasmGraphBuilder { wasm::ValueType type); static void PrintDebugName(Node* node); - void set_wasm_context(Node* wasm_context) { - this->wasm_context_ = wasm_context; + void set_instance_node(Node* instance_node) { + this->instance_node_ = instance_node; } Node* Control() { return *control_; } @@ -389,17 +340,17 @@ class WasmGraphBuilder { void GetGlobalBaseAndOffset(MachineType mem_type, uint32_t offset, Node** base_node, Node** offset_node); - // Utilities to manipulate sets of context cache nodes. - void InitContextCache(WasmContextCacheNodes* context_cache); - void PrepareContextCacheForLoop(WasmContextCacheNodes* context_cache, - Node* control); - void NewContextCacheMerge(WasmContextCacheNodes* to, - WasmContextCacheNodes* from, Node* merge); - void MergeContextCacheInto(WasmContextCacheNodes* to, - WasmContextCacheNodes* from, Node* merge); + // Utilities to manipulate sets of instance cache nodes. + void InitInstanceCache(WasmInstanceCacheNodes* instance_cache); + void PrepareInstanceCacheForLoop(WasmInstanceCacheNodes* instance_cache, + Node* control); + void NewInstanceCacheMerge(WasmInstanceCacheNodes* to, + WasmInstanceCacheNodes* from, Node* merge); + void MergeInstanceCacheInto(WasmInstanceCacheNodes* to, + WasmInstanceCacheNodes* from, Node* merge); - void set_context_cache(WasmContextCacheNodes* context_cache) { - this->context_cache_ = context_cache; + void set_instance_cache(WasmInstanceCacheNodes* instance_cache) { + this->instance_cache_ = instance_cache; } wasm::FunctionSig* GetFunctionSignature() { return sig_; } @@ -446,15 +397,14 @@ class WasmGraphBuilder { // env_ == nullptr means we're not compiling Wasm functions, such as for // wrappers or interpreter stubs. ModuleEnv* const env_ = nullptr; - SetOncePointer wasm_context_; + SetOncePointer instance_node_; struct FunctionTableNodes { Node* table_addr; Node* size; }; - ZoneVector function_tables_; Node** control_ = nullptr; Node** effect_ = nullptr; - WasmContextCacheNodes* context_cache_ = nullptr; + WasmInstanceCacheNodes* instance_cache_ = nullptr; SetOncePointer globals_start_; Node** cur_buffer_; size_t cur_bufsize_; @@ -492,7 +442,8 @@ class WasmGraphBuilder { Node* BuildCCall(MachineSignature* sig, Node* function, Args... args); Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets, wasm::WasmCodePosition position, - Node* wasm_context = nullptr, bool use_retpoline = false); + Node* instance_node = nullptr, + bool use_retpoline = false); Node* BuildF32CopySign(Node* left, Node* right); Node* BuildF64CopySign(Node* left, Node* right); @@ -610,9 +561,9 @@ class WasmGraphBuilder { Builtins::Name GetBuiltinIdForTrap(wasm::TrapReason reason); }; -// The parameter index where the wasm_context paramter should be placed in wasm +// The parameter index where the instance parameter should be placed in wasm // call descriptors. This is used by the Int64Lowering::LowerNode method. -constexpr int kWasmContextParameterIndex = 0; +constexpr int kWasmInstanceParameterIndex = 0; V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor( Zone* zone, wasm::FunctionSig* signature, bool use_retpoline = false); diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc index 442a6c678a..76a7ea58f6 100644 --- a/src/compiler/wasm-linkage.cc +++ b/src/compiler/wasm-linkage.cc @@ -32,6 +32,8 @@ MachineType MachineTypeFor(ValueType type) { return MachineType::Float32(); case wasm::kWasmS128: return MachineType::Simd128(); + case wasm::kWasmAnyRef: + return MachineType::TaggedPointer(); default: UNREACHABLE(); } @@ -225,15 +227,15 @@ static constexpr Allocator parameter_registers(kGPParamRegisters, // General code uses the above configuration data. CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig, bool use_retpoline) { - // The '+ 1' here is to accomodate the wasm_context as first parameter. + // The '+ 1' here is to accomodate the instance object as first parameter. LocationSignature::Builder locations(zone, fsig->return_count(), fsig->parameter_count() + 1); // Add register and/or stack parameter(s). Allocator params = parameter_registers; - // The wasm_context. - locations.AddParam(params.Next(MachineType::PointerRepresentation())); + // The instance object. + locations.AddParam(params.Next(MachineRepresentation::kTaggedPointer)); const int parameter_count = static_cast(fsig->parameter_count()); for (int i = 0; i < parameter_count; i++) { diff --git a/src/elements.h b/src/elements.h index a2b8b49c93..5ca8bc8702 100644 --- a/src/elements.h +++ b/src/elements.h @@ -137,8 +137,8 @@ class ElementsAccessor { virtual uint32_t Push(Handle receiver, Arguments* args, uint32_t push_size) = 0; - virtual uint32_t Unshift(Handle receiver, - Arguments* args, uint32_t unshift_size) = 0; + virtual uint32_t Unshift(Handle receiver, Arguments* args, + uint32_t unshift_size) = 0; virtual Handle Slice(Handle receiver, uint32_t start, uint32_t end) = 0; diff --git a/src/objects-printer.cc b/src/objects-printer.cc index a7257d016c..4b3d194291 100644 --- a/src/objects-printer.cc +++ b/src/objects-printer.cc @@ -1140,8 +1140,6 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT WasmExportedFunction* function = WasmExportedFunction::cast(this); os << "\n - WASM instance " << reinterpret_cast(function->instance()); - os << "\n context " - << reinterpret_cast(function->instance()->wasm_context()->get()); os << "\n - WASM function index " << function->function_index(); } shared()->PrintSourceCode(os); diff --git a/src/objects.cc b/src/objects.cc index af1174f799..d2bfe41fdb 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -14226,7 +14226,6 @@ bool Code::IsProcessIndependent() { mode_mask == (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) | RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | - RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE) | RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_HANDLE) | RelocInfo::ModeMask(RelocInfo::WASM_CALL) | RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) | diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc index 06e0dfe6c8..650570dabb 100644 --- a/src/runtime/runtime-wasm.cc +++ b/src/runtime/runtime-wasm.cc @@ -37,6 +37,7 @@ WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) { return owning_instance; } +// TODO(titzer): rename to GetNativeContextFromWasmInstanceOnStackTop() Context* GetWasmContextOnStackTop(Isolate* isolate) { return GetWasmInstanceOnStackTop(isolate) ->compiled_module() diff --git a/src/wasm/baseline/arm/liftoff-assembler-arm.h b/src/wasm/baseline/arm/liftoff-assembler-arm.h index 36a2622639..039c9d7cad 100644 --- a/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, BAILOUT("LoadConstant"); } -void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, - int size) { - BAILOUT("LoadFromContext"); +void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, + int size) { + BAILOUT("LoadFromInstance"); } -void LiftoffAssembler::SpillContext(Register context) { - BAILOUT("SpillContext"); +void LiftoffAssembler::SpillInstance(Register instance) { + BAILOUT("SpillInstance"); } -void LiftoffAssembler::FillContextInto(Register dst) { - BAILOUT("FillContextInto"); +void LiftoffAssembler::FillInstanceInto(Register dst) { + BAILOUT("FillInstanceInto"); } void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, diff --git a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index bc041f97eb..3419886cb4 100644 --- a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, BAILOUT("LoadConstant"); } -void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, - int size) { - BAILOUT("LoadFromContext"); +void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, + int size) { + BAILOUT("LoadFromInstance"); } -void LiftoffAssembler::SpillContext(Register context) { - BAILOUT("SpillContext"); +void LiftoffAssembler::SpillInstance(Register instance) { + BAILOUT("SpillInstance"); } -void LiftoffAssembler::FillContextInto(Register dst) { - BAILOUT("FillContextInto"); +void LiftoffAssembler::FillInstanceInto(Register dst) { + BAILOUT("FillInstanceInto"); } void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, diff --git a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 1791c90c12..fec0ad18e0 100644 --- a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -16,8 +16,8 @@ namespace wasm { namespace liftoff { -// ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot -// is located at ebp-24. +// ebp-8 holds the stack marker, ebp-16 is the instance parameter, first stack +// slot is located at ebp-24. constexpr int32_t kConstantStackSpace = 16; constexpr int32_t kFirstStackSlotOffset = kConstantStackSpace + LiftoffAssembler::kStackSlotSize; @@ -33,7 +33,7 @@ inline Operand GetHalfStackSlot(uint32_t half_index) { } // TODO(clemensh): Make this a constexpr variable once Operand is constexpr. -inline Operand GetContextOperand() { return Operand(ebp, -16); } +inline Operand GetInstanceOperand() { return Operand(ebp, -16); } static constexpr LiftoffRegList kByteRegs = LiftoffRegList::FromBits()>(); @@ -133,20 +133,20 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, } } -void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, - int size) { +void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, + int size) { DCHECK_LE(offset, kMaxInt); - mov(dst, liftoff::GetContextOperand()); + mov(dst, liftoff::GetInstanceOperand()); DCHECK_EQ(4, size); mov(dst, Operand(dst, offset)); } -void LiftoffAssembler::SpillContext(Register context) { - mov(liftoff::GetContextOperand(), context); +void LiftoffAssembler::SpillInstance(Register instance) { + mov(liftoff::GetInstanceOperand(), instance); } -void LiftoffAssembler::FillContextInto(Register dst) { - mov(dst, liftoff::GetContextOperand()); +void LiftoffAssembler::FillInstanceInto(Register dst) { + mov(dst, liftoff::GetInstanceOperand()); } void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, @@ -1182,7 +1182,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) { } void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { - // Set context to zero. + // Set instance to zero. xor_(esi, esi); CallRuntimeDelayed(zone, fid); } diff --git a/src/wasm/baseline/liftoff-assembler.cc b/src/wasm/baseline/liftoff-assembler.cc index 9f3a5063ad..9f910d59fc 100644 --- a/src/wasm/baseline/liftoff-assembler.cc +++ b/src/wasm/baseline/liftoff-assembler.cc @@ -437,7 +437,7 @@ void LiftoffAssembler::SpillAllRegisters() { void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig, compiler::CallDescriptor* call_descriptor, Register* target, - LiftoffRegister* explicit_context) { + LiftoffRegister* target_instance) { uint32_t num_params = static_cast(sig->parameter_count()); // Input 0 is the call target. constexpr size_t kInputShift = 1; @@ -455,14 +455,14 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig, StackTransferRecipe stack_transfers(this); LiftoffRegList param_regs; - // Move the explicit context (if any) into the correct context register. - compiler::LinkageLocation context_loc = + // Move the target instance (if supplied) into the correct instance register. + compiler::LinkageLocation instance_loc = call_descriptor->GetInputLocation(kInputShift); - DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister()); - LiftoffRegister context_reg(Register::from_code(context_loc.AsRegister())); - param_regs.set(context_reg); - if (explicit_context && *explicit_context != context_reg) { - stack_transfers.MoveRegister(context_reg, *explicit_context, kWasmIntPtr); + DCHECK(instance_loc.IsRegister() && !instance_loc.IsAnyRegister()); + LiftoffRegister instance_reg(Register::from_code(instance_loc.AsRegister())); + param_regs.set(instance_reg); + if (target_instance && *target_instance != instance_reg) { + stack_transfers.MoveRegister(instance_reg, *target_instance, kWasmIntPtr); } // Now move all parameter values into the right slot for the call. @@ -504,7 +504,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig, } } } - // {call_desc_input_idx} should point after the context parameter now. + // {call_desc_input_idx} should point after the instance parameter now. DCHECK_EQ(call_desc_input_idx, kInputShift + 1); // If the target register overlaps with a parameter register, then move the @@ -523,7 +523,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig, } } - // Execute the stack transfers before filling the context register. + // Execute the stack transfers before filling the instance register. stack_transfers.Execute(); // Pop parameters from the value stack. @@ -533,9 +533,9 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig, // Reset register use counters. cache_state_.reset_used_registers(); - // Reload the context from the stack. - if (!explicit_context) { - FillContextInto(context_reg.gp()); + // Reload the instance from the stack. + if (!target_instance) { + FillInstanceInto(instance_reg.gp()); } } diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h index 22b5575209..8859c9e5e2 100644 --- a/src/wasm/baseline/liftoff-assembler.h +++ b/src/wasm/baseline/liftoff-assembler.h @@ -322,7 +322,7 @@ class LiftoffAssembler : public TurboAssembler { // register, or {no_reg} if target was spilled to the stack. void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*, Register* target = nullptr, - LiftoffRegister* explicit_context = nullptr); + LiftoffRegister* target_instance = nullptr); // Process return values of the call. void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*); @@ -352,9 +352,9 @@ class LiftoffAssembler : public TurboAssembler { inline void LoadConstant(LiftoffRegister, WasmValue, RelocInfo::Mode rmode = RelocInfo::NONE); - inline void LoadFromContext(Register dst, uint32_t offset, int size); - inline void SpillContext(Register context); - inline void FillContextInto(Register dst); + inline void LoadFromInstance(Register dst, uint32_t offset, int size); + inline void SpillInstance(Register instance); + inline void FillInstanceInto(Register dst); inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uint32_t offset_imm, LoadType type, LiftoffRegList pinned, uint32_t* protected_load_pc = nullptr); diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index 3927fc39f8..119cce3711 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -32,6 +32,18 @@ namespace { if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \ } while (false) +#define WASM_INSTANCE_OBJECT_OFFSET(name) \ + (WasmInstanceObject::k##name##Offset - kHeapObjectTag) + +#define LOAD_INSTANCE_FIELD(dst, name, type) \ + __ LoadFromInstance(dst.gp(), WASM_INSTANCE_OBJECT_OFFSET(name), \ + LoadType(type).size()); + +#define FIXED_ARRAY_HEADER_SIZE (FixedArray::kHeaderSize - kHeapObjectTag) + +constexpr LoadType::LoadTypeValue kPointerLoadType = + kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load; + #if V8_TARGET_ARCH_ARM64 // On ARM64, the Assembler keeps track of pointers to Labels to resolve // branches to distant targets. Moving labels would confuse the Assembler, @@ -270,24 +282,24 @@ class LiftoffCompiler { // finish compilation without errors even if we hit unimplemented // LiftoffAssembler methods. if (DidAssemblerBailout(decoder)) return; - // Parameter 0 is the wasm context. + // Parameter 0 is the instance parameter. uint32_t num_params = static_cast(decoder->sig_->parameter_count()); for (uint32_t i = 0; i < __ num_locals(); ++i) { if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param")) return; } - // Input 0 is the call target, the context is at 1. - constexpr int kContextParameterIndex = 1; - // Store the context parameter to a special stack slot. - compiler::LinkageLocation context_loc = - descriptor_->GetInputLocation(kContextParameterIndex); - DCHECK(context_loc.IsRegister()); - DCHECK(!context_loc.IsAnyRegister()); - Register context_reg = Register::from_code(context_loc.AsRegister()); - __ SpillContext(context_reg); - // Input 0 is the code target, 1 is the context. First parameter at 2. - uint32_t input_idx = kContextParameterIndex + 1; + // Input 0 is the call target, the instance is at 1. + constexpr int kInstanceParameterIndex = 1; + // Store the instance parameter to a special stack slot. + compiler::LinkageLocation instance_loc = + descriptor_->GetInputLocation(kInstanceParameterIndex); + DCHECK(instance_loc.IsRegister()); + DCHECK(!instance_loc.IsAnyRegister()); + Register instance_reg = Register::from_code(instance_loc.AsRegister()); + __ SpillInstance(instance_reg); + // Input 0 is the code target, 1 is the instance. First parameter at 2. + uint32_t input_idx = kInstanceParameterIndex + 1; for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) { input_idx += ProcessParameter(__ local_type(param_idx), input_idx); } @@ -940,13 +952,12 @@ class LiftoffCompiler { if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global")) return; LiftoffRegList pinned; - Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp(); - __ LoadFromContext(addr, offsetof(WasmContext, globals_start), - kPointerSize); + LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg)); + LOAD_INSTANCE_FIELD(addr, GlobalsStart, kPointerLoadType); LiftoffRegister value = pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned)); LoadType type = LoadType::ForValueType(global->type); - __ Load(value, addr, no_reg, global->offset, type, pinned); + __ Load(value, addr.gp(), no_reg, global->offset, type, pinned); __ PushRegister(global->type, value); } @@ -956,12 +967,11 @@ class LiftoffCompiler { if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global")) return; LiftoffRegList pinned; - Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp(); - __ LoadFromContext(addr, offsetof(WasmContext, globals_start), - kPointerSize); + LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg)); + LOAD_INSTANCE_FIELD(addr, GlobalsStart, kPointerLoadType); LiftoffRegister reg = pinned.set(__ PopToRegister(pinned)); StoreType type = StoreType::ForValueType(global->type); - __ Store(addr, no_reg, global->offset, reg, type, pinned); + __ Store(addr.gp(), no_reg, global->offset, reg, type, pinned); } void Unreachable(Decoder* decoder) { unsupported(decoder, "unreachable"); } @@ -1116,7 +1126,7 @@ class LiftoffCompiler { LiftoffRegister end_offset_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned); - __ LoadFromContext(mem_size.gp(), offsetof(WasmContext, mem_size), 4); + LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load); __ LoadConstant(end_offset_reg, WasmValue(end_offset)); if (end_offset >= min_size_) { __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, @@ -1207,12 +1217,12 @@ class LiftoffCompiler { if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) { return; } - Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); - __ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize); + LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); + LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerLoadType); RegClass rc = reg_class_for(value_type); LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); uint32_t protected_load_pc = 0; - __ Load(value, addr, index, operand.offset, type, pinned, + __ Load(value, addr.gp(), index, operand.offset, type, pinned, &protected_load_pc); if (env_->use_trap_handler) { AddOutOfLineTrap(decoder->position(), @@ -1238,10 +1248,10 @@ class LiftoffCompiler { if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) { return; } - Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); - __ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize); + LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); + LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerLoadType); uint32_t protected_store_pc = 0; - __ Store(addr, index, operand.offset, value, type, pinned, + __ Store(addr.gp(), index, operand.offset, value, type, pinned, &protected_store_pc); if (env_->use_trap_handler) { AddOutOfLineTrap(decoder->position(), @@ -1276,19 +1286,55 @@ class LiftoffCompiler { call_descriptor = GetLoweredCallDescriptor(compilation_zone_, call_descriptor); - __ PrepareCall(operand.sig, call_descriptor); + if (operand.index < env_->module->num_imported_functions) { + // A direct call to an imported function. + LiftoffRegList pinned; + LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); + LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - source_position_table_builder_->AddPosition( - __ pc_offset(), SourcePosition(decoder->position()), false); + LiftoffRegister imported_targets = tmp; + LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets, + kPointerLoadType); + __ Load(target, imported_targets.gp(), no_reg, + operand.index * sizeof(Address), kPointerLoadType, pinned); - // Just encode the function index. This will be patched at instantiation. - Address addr = reinterpret_cast
(operand.index); - __ CallNativeWasmCode(addr); + LiftoffRegister imported_instances = tmp; + LOAD_INSTANCE_FIELD(imported_instances, ImportedFunctionInstances, + kPointerLoadType); + LiftoffRegister target_instance = tmp; + __ Load(target_instance, imported_instances.gp(), no_reg, + compiler::FixedArrayOffsetMinusTag(operand.index), + kPointerLoadType, pinned); - safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0, - Safepoint::kNoLazyDeopt); + LiftoffRegister* explicit_instance = &target_instance; + Register target_reg = target.gp(); + __ PrepareCall(operand.sig, call_descriptor, &target_reg, + explicit_instance); + source_position_table_builder_->AddPosition( + __ pc_offset(), SourcePosition(decoder->position()), false); - __ FinishCall(operand.sig, call_descriptor); + __ CallIndirect(operand.sig, call_descriptor, target_reg); + + safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + + __ FinishCall(operand.sig, call_descriptor); + } else { + // A direct call within this module just gets the current instance. + __ PrepareCall(operand.sig, call_descriptor); + + source_position_table_builder_->AddPosition( + __ pc_offset(), SourcePosition(decoder->position()), false); + + // Just encode the function index. This will be patched at instantiation. + Address addr = reinterpret_cast
(operand.index); + __ CallNativeWasmCode(addr); + + safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + + __ FinishCall(operand.sig, call_descriptor); + } } void CallIndirect(Decoder* decoder, const Value& index_val, @@ -1321,37 +1367,31 @@ class LiftoffCompiler { pinned.set(__ GetUnusedRegister(kGpReg, pinned)); LiftoffRegister scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - LiftoffRegister* explicit_context = nullptr; - // Bounds check against the table size. Label* invalid_func_label = AddOutOfLineTrap( decoder->position(), Builtins::kThrowWasmTrapFuncInvalid); - static constexpr LoadType kPointerLoadType = - kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load; - uint32_t canonical_sig_num = env_->module->signature_ids[operand.sig_index]; DCHECK_GE(canonical_sig_num, 0); DCHECK_GE(kMaxInt, canonical_sig_num); - // Compare against table size stored in {wasm_context->table_size}. - __ LoadFromContext(tmp_const.gp(), offsetof(WasmContext, table_size), - sizeof(uint32_t)); + // Compare against table size stored in + // {instance->indirect_function_table_size}. + LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, + LoadType::kI32Load); __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32, index.gp(), tmp_const.gp()); - // Load the table from {wasm_context->table} - __ LoadFromContext(table.gp(), offsetof(WasmContext, table), kPointerSize); - // Load the signature from {wasm_context->table[$index].sig_id} - // == wasm_context.table + $index * #sizeof(IndirectionFunctionTableEntry) - // + #offsetof(sig_id) - __ LoadConstant( - tmp_const, - WasmValue(static_cast(sizeof(IndirectFunctionTableEntry)))); - __ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp()); - __ Load(scratch, table.gp(), index.gp(), - offsetof(IndirectFunctionTableEntry, sig_id), LoadType::kI32Load, - pinned); + // Load the signature from {instance->ift_sig_ids[key]} + LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kPointerLoadType); + __ LoadConstant(tmp_const, + WasmValue(static_cast(sizeof(uint32_t)))); + // TODO(wasm): use a emit_i32_shli() instead of a multiply. + // (currently cannot use shl on ia32/x64 because it clobbers %rcx). + __ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp()); + __ Load(scratch, table.gp(), index.gp(), 0, LoadType::kI32Load, pinned); + + // Compare against expected signature. __ LoadConstant(tmp_const, WasmValue(canonical_sig_num)); Label* sig_mismatch_label = AddOutOfLineTrap( @@ -1360,18 +1400,22 @@ class LiftoffCompiler { LiftoffAssembler::kWasmIntPtr, scratch.gp(), tmp_const.gp()); - // Load the target address from {wasm_context->table[$index].target} - __ Load(scratch, table.gp(), index.gp(), - offsetof(IndirectFunctionTableEntry, target), kPointerLoadType, - pinned); + if (kPointerSize == 8) { + // {index} has already been multiplied by 4. Multiply by another 2. + __ LoadConstant(tmp_const, WasmValue(2)); + __ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp()); + } - // Load the context from {wasm_context->table[$index].context} - // TODO(wasm): directly allocate the correct context register to avoid - // any potential moves. - __ Load(tmp_const, table.gp(), index.gp(), - offsetof(IndirectFunctionTableEntry, context), kPointerLoadType, - pinned); - explicit_context = &tmp_const; + // Load the target from {instance->ift_targets[key]} + LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, kPointerLoadType); + __ Load(scratch, table.gp(), index.gp(), 0, kPointerLoadType, pinned); + + // Load the instance from {instance->ift_instances[key]} + LOAD_INSTANCE_FIELD(table, IndirectFunctionTableInstances, + kPointerLoadType); + __ Load(tmp_const, table.gp(), index.gp(), FIXED_ARRAY_HEADER_SIZE, + kPointerLoadType, pinned); + LiftoffRegister* explicit_instance = &tmp_const; source_position_table_builder_->AddPosition( __ pc_offset(), SourcePosition(decoder->position()), false); @@ -1382,7 +1426,7 @@ class LiftoffCompiler { GetLoweredCallDescriptor(compilation_zone_, call_descriptor); Register target = scratch.gp(); - __ PrepareCall(operand.sig, call_descriptor, &target, explicit_context); + __ PrepareCall(operand.sig, call_descriptor, &target, explicit_instance); __ CallIndirect(operand.sig, call_descriptor, target); safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0, @@ -1518,6 +1562,9 @@ bool compiler::WasmCompilationUnit::ExecuteLiftoffCompilation() { #undef __ #undef TRACE +#undef WASM_INSTANCE_OBJECT_OFFSET +#undef LOAD_INSTANCE_FIELD +#undef FIXED_ARRAY_HEADER_SIZE } // namespace internal } // namespace v8 diff --git a/src/wasm/baseline/mips/liftoff-assembler-mips.h b/src/wasm/baseline/mips/liftoff-assembler-mips.h index 57920073b1..df40eeb5a0 100644 --- a/src/wasm/baseline/mips/liftoff-assembler-mips.h +++ b/src/wasm/baseline/mips/liftoff-assembler-mips.h @@ -15,8 +15,8 @@ namespace wasm { namespace liftoff { -// fp-8 holds the stack marker, fp-16 is the wasm context, first stack slot -// is located at fp-24. +// fp-8 holds the stack marker, fp-16 is the instance parameter, first stack +// slot is located at fp-24. constexpr int32_t kConstantStackSpace = 16; constexpr int32_t kFirstStackSlotOffset = kConstantStackSpace + LiftoffAssembler::kStackSlotSize; @@ -31,7 +31,7 @@ inline MemOperand GetHalfStackSlot(uint32_t half_index) { return MemOperand(fp, -kFirstStackSlotOffset - offset); } -inline MemOperand GetContextOperand() { return MemOperand(fp, -16); } +inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); } // Use this register to store the address of the last argument pushed on the // stack for a call to C. This register must be callee saved according to the c @@ -129,20 +129,20 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, } } -void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, - int size) { +void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, + int size) { DCHECK_LE(offset, kMaxInt); - lw(dst, liftoff::GetContextOperand()); + lw(dst, liftoff::GetInstanceOperand()); DCHECK_EQ(4, size); lw(dst, MemOperand(dst, offset)); } -void LiftoffAssembler::SpillContext(Register context) { - sw(context, liftoff::GetContextOperand()); +void LiftoffAssembler::SpillInstance(Register instance) { + sw(instance, liftoff::GetInstanceOperand()); } -void LiftoffAssembler::FillContextInto(Register dst) { - lw(dst, liftoff::GetContextOperand()); +void LiftoffAssembler::FillInstanceInto(Register dst) { + lw(dst, liftoff::GetInstanceOperand()); } void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, @@ -880,7 +880,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) { } void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { - // Set context to zero. + // Set instance to zero. TurboAssembler::Move(cp, zero_reg); CallRuntimeDelayed(zone, fid); } diff --git a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index 669ed42db9..fd4e0d8278 100644 --- a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -15,8 +15,8 @@ namespace wasm { namespace liftoff { -// fp-8 holds the stack marker, fp-16 is the wasm context, first stack slot -// is located at fp-24. +// fp-8 holds the stack marker, fp-16 is the instance parameter, first stack +// slot is located at fp-24. constexpr int32_t kConstantStackSpace = 16; constexpr int32_t kFirstStackSlotOffset = kConstantStackSpace + LiftoffAssembler::kStackSlotSize; @@ -26,7 +26,7 @@ inline MemOperand GetStackSlot(uint32_t index) { return MemOperand(fp, -kFirstStackSlotOffset - offset); } -inline MemOperand GetContextOperand() { return MemOperand(fp, -16); } +inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); } // Use this register to store the address of the last argument pushed on the // stack for a call to C. This register must be callee saved according to the c @@ -120,10 +120,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, } } -void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, - int size) { +void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, + int size) { DCHECK_LE(offset, kMaxInt); - ld(dst, liftoff::GetContextOperand()); + ld(dst, liftoff::GetInstanceOperand()); DCHECK(size == 4 || size == 8); if (size == 4) { lw(dst, MemOperand(dst, offset)); @@ -132,12 +132,12 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, } } -void LiftoffAssembler::SpillContext(Register context) { - sd(context, liftoff::GetContextOperand()); +void LiftoffAssembler::SpillInstance(Register instance) { + sd(instance, liftoff::GetInstanceOperand()); } -void LiftoffAssembler::FillContextInto(Register dst) { - ld(dst, liftoff::GetContextOperand()); +void LiftoffAssembler::FillInstanceInto(Register dst) { + ld(dst, liftoff::GetInstanceOperand()); } void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, @@ -707,7 +707,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) { } void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { - // Set context to zero. + // Set instance to zero. TurboAssembler::Move(cp, zero_reg); CallRuntimeDelayed(zone, fid); } diff --git a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h index 1657753afa..9effacaff6 100644 --- a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h +++ b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h @@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, BAILOUT("LoadConstant"); } -void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, - int size) { - BAILOUT("LoadFromContext"); +void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, + int size) { + BAILOUT("LoadFromInstance"); } -void LiftoffAssembler::SpillContext(Register context) { - BAILOUT("SpillContext"); +void LiftoffAssembler::SpillInstance(Register instance) { + BAILOUT("SpillInstance"); } -void LiftoffAssembler::FillContextInto(Register dst) { - BAILOUT("FillContextInto"); +void LiftoffAssembler::FillInstanceInto(Register dst) { + BAILOUT("FillInstanceInto"); } void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, diff --git a/src/wasm/baseline/s390/liftoff-assembler-s390.h b/src/wasm/baseline/s390/liftoff-assembler-s390.h index 56f78cb4f2..1f210517ec 100644 --- a/src/wasm/baseline/s390/liftoff-assembler-s390.h +++ b/src/wasm/baseline/s390/liftoff-assembler-s390.h @@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, BAILOUT("LoadConstant"); } -void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, - int size) { - BAILOUT("LoadFromContext"); +void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, + int size) { + BAILOUT("LoadFromInstance"); } -void LiftoffAssembler::SpillContext(Register context) { - BAILOUT("SpillContext"); +void LiftoffAssembler::SpillInstance(Register instance) { + BAILOUT("SpillInstance"); } -void LiftoffAssembler::FillContextInto(Register dst) { - BAILOUT("FillContextInto"); +void LiftoffAssembler::FillInstanceInto(Register dst) { + BAILOUT("FillInstanceInto"); } void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, diff --git a/src/wasm/baseline/x64/liftoff-assembler-x64.h b/src/wasm/baseline/x64/liftoff-assembler-x64.h index 8d6f1370be..a70f92c8f7 100644 --- a/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -16,8 +16,8 @@ namespace wasm { namespace liftoff { -// rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot -// is located at rbp-24. +// rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack +// slot is located at rbp-24. constexpr int32_t kConstantStackSpace = 16; constexpr int32_t kFirstStackSlotOffset = kConstantStackSpace + LiftoffAssembler::kStackSlotSize; @@ -28,7 +28,7 @@ inline Operand GetStackSlot(uint32_t index) { } // TODO(clemensh): Make this a constexpr variable once Operand is constexpr. -inline Operand GetContextOperand() { return Operand(rbp, -16); } +inline Operand GetInstanceOperand() { return Operand(rbp, -16); } // Use this register to store the address of the last argument pushed on the // stack for a call to C. This register must be callee saved according to the c @@ -131,10 +131,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, } } -void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, - int size) { +void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, + int size) { DCHECK_LE(offset, kMaxInt); - movp(dst, liftoff::GetContextOperand()); + movp(dst, liftoff::GetInstanceOperand()); DCHECK(size == 4 || size == 8); if (size == 4) { movl(dst, Operand(dst, offset)); @@ -143,12 +143,12 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, } } -void LiftoffAssembler::SpillContext(Register context) { - movp(liftoff::GetContextOperand(), context); +void LiftoffAssembler::SpillInstance(Register instance) { + movp(liftoff::GetInstanceOperand(), instance); } -void LiftoffAssembler::FillContextInto(Register dst) { - movp(dst, liftoff::GetContextOperand()); +void LiftoffAssembler::FillInstanceInto(Register dst) { + movp(dst, liftoff::GetInstanceOperand()); } void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, @@ -972,7 +972,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) { } void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { - // Set context to zero. + // Set instance to zero. xorp(rsi, rsi); CallRuntimeDelayed(zone, fid); } diff --git a/src/wasm/function-body-decoder-impl.h b/src/wasm/function-body-decoder-impl.h index 3d333fbcf2..3c941d4dde 100644 --- a/src/wasm/function-body-decoder-impl.h +++ b/src/wasm/function-body-decoder-impl.h @@ -774,7 +774,7 @@ class WasmDecoder : public Decoder { case kExprGrowMemory: case kExprCallFunction: case kExprCallIndirect: - // Add context cache nodes to the assigned set. + // Add instance cache nodes to the assigned set. // TODO(titzer): make this more clear. assigned->Add(locals_count - 1); length = OpcodeLength(decoder, pc); diff --git a/src/wasm/function-body-decoder.cc b/src/wasm/function-body-decoder.cc index b25b0701f4..0f9bec1fd0 100644 --- a/src/wasm/function-body-decoder.cc +++ b/src/wasm/function-body-decoder.cc @@ -37,7 +37,7 @@ struct SsaEnv { State state; TFNode* control; TFNode* effect; - compiler::WasmContextCacheNodes context_cache; + compiler::WasmInstanceCacheNodes instance_cache; TFNode** locals; bool go() { return state >= kReached; } @@ -46,7 +46,7 @@ struct SsaEnv { locals = nullptr; control = nullptr; effect = nullptr; - context_cache = {}; + instance_cache = {}; } void SetNotMerged() { if (state == kMerged) state = kReached; @@ -100,14 +100,14 @@ class WasmGraphBuildingInterface { : nullptr; // The first '+ 1' is needed by TF Start node, the second '+ 1' is for the - // wasm_context parameter. + // instance parameter. TFNode* start = builder_->Start( static_cast(decoder->sig_->parameter_count() + 1 + 1)); - // Initialize the wasm_context (the paramater at index 0). - builder_->set_wasm_context( - builder_->Param(compiler::kWasmContextParameterIndex)); + // Initialize the instance parameter (index 0). + builder_->set_instance_node( + builder_->Param(compiler::kWasmInstanceParameterIndex)); // Initialize local variables. Parameters are shifted by 1 because of the - // the wasm_context. + // the instance parameter. uint32_t index = 0; for (; index < decoder->sig_->parameter_count(); ++index) { ssa_env->locals[index] = builder_->Param(index + 1); @@ -129,11 +129,10 @@ class WasmGraphBuildingInterface { SetEnv(ssa_env); } - // Reload the wasm context variables from the WasmContext structure attached - // to the memory object into the Ssa Environment. + // Reload the instance cache entries into the Ssa Environment. void LoadContextIntoSsa(SsaEnv* ssa_env) { if (!ssa_env || !ssa_env->go()) return; - builder_->InitContextCache(&ssa_env->context_cache); + builder_->InitInstanceCache(&ssa_env->instance_cache); } void StartFunctionBody(Decoder* decoder, Control* block) { @@ -366,7 +365,7 @@ class WasmGraphBuildingInterface { void GrowMemory(Decoder* decoder, const Value& value, Value* result) { result->node = BUILD(GrowMemory, value.node); - // Always reload the context cache after growing memory. + // Always reload the instance cache after growing memory. LoadContextIntoSsa(ssa_env_); } @@ -549,10 +548,10 @@ class WasmGraphBuildingInterface { } #endif ssa_env_ = env; - // TODO(wasm): combine the control and effect pointers with context cache. + // TODO(wasm): combine the control and effect pointers with instance cache. builder_->set_control_ptr(&env->control); builder_->set_effect_ptr(&env->effect); - builder_->set_context_cache(&env->context_cache); + builder_->set_instance_cache(&env->instance_cache); } TFNode* CheckForException(Decoder* decoder, TFNode* node) { @@ -638,7 +637,7 @@ class WasmGraphBuildingInterface { to->locals = from->locals; to->control = from->control; to->effect = from->effect; - to->context_cache = from->context_cache; + to->instance_cache = from->instance_cache; break; } case SsaEnv::kReached: { // Create a new merge. @@ -662,9 +661,9 @@ class WasmGraphBuildingInterface { builder_->Phi(decoder->GetLocalType(i), 2, vals, merge); } } - // Start a new merge from the context cache. - builder_->NewContextCacheMerge(&to->context_cache, &from->context_cache, - merge); + // Start a new merge from the instance cache. + builder_->NewInstanceCacheMerge(&to->instance_cache, + &from->instance_cache, merge); break; } case SsaEnv::kMerged: { @@ -679,9 +678,9 @@ class WasmGraphBuildingInterface { to->locals[i] = builder_->CreateOrMergeIntoPhi( decoder->GetLocalType(i), merge, to->locals[i], from->locals[i]); } - // Merge the context caches. - builder_->MergeContextCacheInto(&to->context_cache, - &from->context_cache, merge); + // Merge the instance caches. + builder_->MergeInstanceCacheInto(&to->instance_cache, + &from->instance_cache, merge); break; } default: @@ -697,21 +696,22 @@ class WasmGraphBuildingInterface { env->control = builder_->Loop(env->control); env->effect = builder_->EffectPhi(1, &env->effect, env->control); builder_->Terminate(env->effect, env->control); - // The '+ 1' here is to be able to set the context cache as assigned. + // The '+ 1' here is to be able to set the instance cache as assigned. BitVector* assigned = WasmDecoder::AnalyzeLoopAssignment( decoder, decoder->pc(), decoder->total_locals() + 1, decoder->zone()); if (decoder->failed()) return env; if (assigned != nullptr) { // Only introduce phis for variables assigned in this loop. - int context_cache_index = decoder->total_locals(); + int instance_cache_index = decoder->total_locals(); for (int i = decoder->NumLocals() - 1; i >= 0; i--) { if (!assigned->Contains(i)) continue; env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1, &env->locals[i], env->control); } - // Introduce phis for context cache pointers if necessary. - if (assigned->Contains(context_cache_index)) { - builder_->PrepareContextCacheForLoop(&env->context_cache, env->control); + // Introduce phis for instance cache pointers if necessary. + if (assigned->Contains(instance_cache_index)) { + builder_->PrepareInstanceCacheForLoop(&env->instance_cache, + env->control); } SsaEnv* loop_body_env = Split(decoder, env); @@ -726,8 +726,8 @@ class WasmGraphBuildingInterface { &env->locals[i], env->control); } - // Conservatively introduce phis for context cache. - builder_->PrepareContextCacheForLoop(&env->context_cache, env->control); + // Conservatively introduce phis for instance cache. + builder_->PrepareInstanceCacheForLoop(&env->instance_cache, env->control); SsaEnv* loop_body_env = Split(decoder, env); builder_->StackCheck(decoder->position(), &loop_body_env->effect, @@ -750,11 +750,11 @@ class WasmGraphBuildingInterface { size > 0 ? reinterpret_cast(decoder->zone()->New(size)) : nullptr; memcpy(result->locals, from->locals, size); - result->context_cache = from->context_cache; + result->instance_cache = from->instance_cache; } else { result->state = SsaEnv::kUnreachable; result->locals = nullptr; - result->context_cache = {}; + result->instance_cache = {}; } return result; @@ -770,7 +770,7 @@ class WasmGraphBuildingInterface { result->locals = from->locals; result->control = from->control; result->effect = from->effect; - result->context_cache = from->context_cache; + result->instance_cache = from->instance_cache; from->Kill(SsaEnv::kUnreachable); return result; } @@ -782,7 +782,7 @@ class WasmGraphBuildingInterface { result->control = nullptr; result->effect = nullptr; result->locals = nullptr; - result->context_cache = {}; + result->instance_cache = {}; return result; } diff --git a/src/wasm/module-compiler.cc b/src/wasm/module-compiler.cc index d3653c84fa..67d1303a54 100644 --- a/src/wasm/module-compiler.cc +++ b/src/wasm/module-compiler.cc @@ -56,8 +56,6 @@ namespace v8 { namespace internal { namespace wasm { -static constexpr int kInvalidSigIndex = -1; - enum class CompilationEvent : uint8_t { kFinishedBaselineCompilation, kFailedCompilation @@ -182,12 +180,6 @@ namespace { class JSToWasmWrapperCache { public: - void SetContextAddress(Address context_address) { - // Prevent to have different context addresses in the cache. - DCHECK(code_cache_.empty()); - context_address_ = context_address; - } - Handle CloneOrCompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module, wasm::WasmCode* wasm_code, @@ -206,7 +198,7 @@ class JSToWasmWrapperCache { } Handle code = compiler::CompileJSToWasmWrapper( - isolate, module, wasm_code, index, context_address_, use_trap_handler); + isolate, module, weak_instance_, wasm_code, index, use_trap_handler); uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig); DCHECK_EQ(code_cache_.size(), new_cache_idx); USE(new_cache_idx); @@ -214,11 +206,15 @@ class JSToWasmWrapperCache { return code; } + void SetWeakInstance(Handle weak_instance) { + weak_instance_ = weak_instance; + } + private: // sig_map_ maps signatures to an index in code_cache_. wasm::SignatureMap sig_map_; std::vector> code_cache_; - Address context_address_ = nullptr; + Handle weak_instance_; }; // A helper class to simplify instantiating a module from a compiled module. @@ -241,7 +237,7 @@ class InstanceBuilder { struct TableInstance { Handle table_object; // WebAssembly.Table instance Handle js_wrappers; // JSFunctions exported - Handle function_table; // internal array of pairs + size_t table_size; }; // A pre-evaluated value to use in import binding. @@ -308,15 +304,12 @@ class InstanceBuilder { uint32_t EvalUint32InitExpr(const WasmInitExpr& expr); // Load data segments into the memory. - void LoadDataSegments(WasmContext* wasm_context); + void LoadDataSegments(Handle instance); void WriteGlobalValue(WasmGlobal& global, Handle value); void SanitizeImports(); - Handle SetupWasmToJSImportsTable( - Handle instance); - // Process the imports, including functions, tables, globals, and memory, in // order, loading them from the {ffi_} object. Returns the number of imported // functions. @@ -367,16 +360,6 @@ class SetOfNativeModuleModificationScopes final { std::unordered_set native_modules_; }; -void EnsureWasmContextTable(WasmContext* wasm_context, int table_size) { - if (wasm_context->table) return; - wasm_context->table_size = table_size; - wasm_context->table = reinterpret_cast( - calloc(table_size, sizeof(IndirectFunctionTableEntry))); - for (int i = 0; i < table_size; i++) { - wasm_context->table[i].sig_id = kInvalidSigIndex; - } -} - } // namespace MaybeHandle InstantiateToInstanceObject( @@ -395,61 +378,74 @@ Address CompileLazy(Isolate* isolate) { HistogramTimerScope lazy_time_scope( isolate->counters()->wasm_lazy_compilation_time()); - // Find the wasm frame which triggered the lazy compile, to get the wasm - // instance. + //========================================================================== + // Begin stack walk. + //========================================================================== StackFrameIterator it(isolate); + + //========================================================================== // First frame: C entry stub. + //========================================================================== DCHECK(!it.done()); DCHECK_EQ(StackFrame::EXIT, it.frame()->type()); it.Advance(); + + //========================================================================== // Second frame: WasmCompileLazy builtin. + //========================================================================== DCHECK(!it.done()); - Handle instance; - Maybe func_index_to_compile = Nothing(); - Handle exp_deopt_data_entry; - const wasm::WasmCode* lazy_stub_or_copy = + Handle target_instance; + int target_func_index = -1; + bool indirectly_called = false; + const wasm::WasmCode* lazy_stub = isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc()); - DCHECK_EQ(wasm::WasmCode::kLazyStub, lazy_stub_or_copy->kind()); - if (!lazy_stub_or_copy->IsAnonymous()) { - // Then it's an indirect call or via JS->wasm wrapper. - instance = handle(lazy_stub_or_copy->native_module() - ->compiled_module() - ->owning_instance(), - isolate); - func_index_to_compile = Just(lazy_stub_or_copy->index()); - exp_deopt_data_entry = - handle(instance->compiled_module()->lazy_compile_data()->get( - static_cast(lazy_stub_or_copy->index())), + CHECK_EQ(wasm::WasmCode::kLazyStub, lazy_stub->kind()); + if (!lazy_stub->IsAnonymous()) { + // If the lazy stub is not "anonymous", then its copy encodes the target + // function index. Used for import and indirect calls. + target_instance = + handle(lazy_stub->native_module()->compiled_module()->owning_instance(), isolate); + target_func_index = lazy_stub->index(); + indirectly_called = true; } it.Advance(); + + //========================================================================== // Third frame: The calling wasm code (direct or indirect), or js-to-wasm // wrapper. + //========================================================================== DCHECK(!it.done()); DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled()); Handle js_to_wasm_caller_code; const WasmCode* wasm_caller_code = nullptr; - Maybe offset = Nothing(); + int32_t caller_ret_offset = -1; if (it.frame()->is_js_to_wasm()) { - DCHECK(!instance.is_null()); + DCHECK(!target_instance.is_null()); js_to_wasm_caller_code = handle(it.frame()->LookupCode(), isolate); + // This wasn't actually an indirect call, but a JS->wasm call. + indirectly_called = false; } else { wasm_caller_code = isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc()); - offset = Just(static_cast( - it.frame()->pc() - wasm_caller_code->instructions().start())); - if (instance.is_null()) { - // Then this is a direct call (otherwise we would have attached the - // instance via deopt data to the lazy compile stub). Just use the - // instance of the caller. - instance = handle(wasm_caller_code->native_module() - ->compiled_module() - ->owning_instance(), - isolate); + auto offset = it.frame()->pc() - wasm_caller_code->instructions().start(); + caller_ret_offset = static_cast(offset); + DCHECK_EQ(offset, caller_ret_offset); + if (target_instance.is_null()) { + // This is a direct call within the same instance, so the target + // instance is the same as the calling instance. + target_instance = handle(wasm_caller_code->native_module() + ->compiled_module() + ->owning_instance(), + isolate); } } - Handle compiled_module(instance->compiled_module()); + //========================================================================== + // Begin compilation. + //========================================================================== + Handle compiled_module( + target_instance->compiled_module()); wasm::LazyCompilationOrchestrator* orchestrator = Managed::cast( @@ -461,52 +457,49 @@ Address CompileLazy(Isolate* isolate) { compiled_module->GetNativeModule()); const wasm::WasmCode* result = nullptr; - // The caller may be js to wasm calling a function - // also available for indirect calls. + if (!js_to_wasm_caller_code.is_null()) { result = orchestrator->CompileFromJsToWasm( - isolate, instance, js_to_wasm_caller_code, - func_index_to_compile.ToChecked()); + isolate, target_instance, js_to_wasm_caller_code, target_func_index); + DCHECK_NOT_NULL(result); + DCHECK_EQ(target_func_index, result->index()); } else { DCHECK_NOT_NULL(wasm_caller_code); - if (func_index_to_compile.IsNothing() || - (!exp_deopt_data_entry.is_null() && - !exp_deopt_data_entry->IsFixedArray())) { + if (target_func_index < 0) { result = orchestrator->CompileDirectCall( - isolate, instance, func_index_to_compile, wasm_caller_code, - offset.ToChecked()); + isolate, target_instance, wasm_caller_code, caller_ret_offset); + DCHECK_NOT_NULL(result); } else { - result = orchestrator->CompileIndirectCall( - isolate, instance, func_index_to_compile.ToChecked()); + result = orchestrator->CompileIndirectCall(isolate, target_instance, + target_func_index); + DCHECK_NOT_NULL(result); } } - DCHECK_NOT_NULL(result); - int func_index = static_cast(result->index()); - if (!exp_deopt_data_entry.is_null() && exp_deopt_data_entry->IsFixedArray()) { - Handle exp_deopt_data = - Handle::cast(exp_deopt_data_entry); - - TRACE_LAZY("Patching %d position(s) in function tables.\n", - exp_deopt_data->length() / 2); - - // See EnsureExportedLazyDeoptData: exp_deopt_data[0...(len-1)] are pairs - // of followed by undefined values. Use this - // information here to patch all export tables. - Address target = result->instructions().start(); - for (int idx = 0, end = exp_deopt_data->length(); idx < end; idx += 2) { - if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break; - DisallowHeapAllocation no_gc; - int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1)); - - // TODO(titzer): patching of function tables for lazy compilation - // only works for a single instance. - instance->wasm_context()->get()->table[exp_index].target = target; + //========================================================================== + // Update import and indirect function tables in the caller. + //========================================================================== + if (indirectly_called) { + DCHECK_NOT_NULL(wasm_caller_code); + Handle caller_instance( + WasmInstanceObject::GetOwningInstance(wasm_caller_code), isolate); + WasmModule* module = caller_instance->compiled_module()->shared()->module(); + Address old_target = lazy_stub->instructions().start(); + // TODO(wasm): this is O(n^2), since we scan the entire IFT and imports + // for every lazy compile. Introduce limited scanning. + for (unsigned i = 0; i < module->num_imported_functions; i++) { + auto entry = caller_instance->imported_function_entry_at(i); + if (entry.target() == old_target) { + entry.set(target_instance, result); + } + } + for (unsigned i = 0; i < caller_instance->indirect_function_table_size(); + i++) { + auto entry = caller_instance->indirect_function_table_entry_at(i); + if (entry.target() == old_target) { + entry.set(entry.sig_id(), target_instance, result); + } } - // After processing, remove the list of exported entries, such that we don't - // do the patching redundantly. - compiled_module->lazy_compile_data()->set( - func_index, isolate->heap()->undefined_value()); } return result->instructions().start(); @@ -516,18 +509,23 @@ compiler::ModuleEnv CreateModuleEnvFromCompiledModule( Isolate* isolate, Handle compiled_module) { DisallowHeapAllocation no_gc; WasmModule* module = compiled_module->shared()->module(); - compiler::ModuleEnv result(module, std::vector
{}, - compiled_module->use_trap_handler()); + compiler::ModuleEnv result(module, compiled_module->use_trap_handler()); return result; } const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction( - Isolate* isolate, Handle instance, int func_index) { + Isolate* isolate, Handle compiled_module, + int func_index) { base::ElapsedTimer compilation_timer; - compilation_timer.Start(); - Handle compiled_module(instance->compiled_module(), - isolate); + wasm::WasmCode* existing_code = compiled_module->GetNativeModule()->GetCode( + static_cast(func_index)); + if (existing_code != nullptr && + existing_code->kind() == wasm::WasmCode::kFunction) { + TRACE_LAZY("Function %d already compiled.\n", func_index); + return existing_code; + } + compilation_timer.Start(); // TODO(wasm): Refactor this to only get the name if it is really needed for // tracing / debugging. std::string func_name; @@ -541,14 +539,6 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction( TRACE_LAZY("Compiling function %s, %d.\n", func_name.c_str(), func_index); - wasm::WasmCode* existing_code = compiled_module->GetNativeModule()->GetCode( - static_cast(func_index)); - if (existing_code != nullptr && - existing_code->kind() == wasm::WasmCode::kFunction) { - TRACE_LAZY("Function %d already compiled.\n", func_index); - return existing_code; - } - compiler::ModuleEnv module_env = CreateModuleEnvFromCompiledModule(isolate, compiled_module); @@ -615,49 +605,11 @@ int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator, return byte_pos; } -const WasmCode* WasmExtractWasmToWasmCallee(const WasmCodeManager* code_manager, - const WasmCode* wasm_to_wasm) { - DCHECK_EQ(WasmCode::kWasmToWasmWrapper, wasm_to_wasm->kind()); - // Find the one code target in this wrapper. - RelocIterator it(wasm_to_wasm->instructions(), wasm_to_wasm->reloc_info(), - wasm_to_wasm->constant_pool(), - RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); - DCHECK(!it.done()); - const WasmCode* callee = - code_manager->LookupCode(it.rinfo()->js_to_wasm_address()); -#ifdef DEBUG - it.next(); - DCHECK(it.done()); -#endif - return callee; -} - -// TODO(mtrofin): this should be a function again, when chromium:761307 -// is addressed. chromium:771171 is also related. -#define WasmPatchWasmToWasmWrapper(isolate, wasm_to_wasm, new_target) \ - do { \ - TRACE_LAZY("Patching wasm-to-wasm wrapper.\n"); \ - DCHECK_EQ(WasmCode::kWasmToWasmWrapper, wasm_to_wasm->kind()); \ - NativeModuleModificationScope scope(wasm_to_wasm->native_module()); \ - RelocIterator it(wasm_to_wasm->instructions(), wasm_to_wasm->reloc_info(), \ - wasm_to_wasm->constant_pool(), \ - RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); \ - DCHECK(!it.done()); \ - DCHECK_EQ(WasmCode::kLazyStub, \ - isolate->wasm_engine() \ - ->code_manager() \ - ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address()) \ - ->kind()); \ - it.rinfo()->set_js_to_wasm_address(new_target->instructions().start()); \ - it.next(); \ - DCHECK(it.done()); \ - } while (0) - } // namespace const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm( Isolate* isolate, Handle instance, - Handle js_to_wasm_caller, uint32_t exported_func_index) { + Handle js_to_wasm_caller, uint32_t callee_func_index) { Decoder decoder(nullptr, nullptr); Handle compiled_module(instance->compiled_module(), isolate); @@ -665,37 +617,24 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm( TRACE_LAZY( "Starting lazy compilation (func %u, js_to_wasm: true, patch caller: " "true). \n", - exported_func_index); - CompileFunction(isolate, instance, exported_func_index); + callee_func_index); + CompileFunction(isolate, compiled_module, callee_func_index); { DisallowHeapAllocation no_gc; - int patched = 0; CodeSpaceMemoryModificationScope modification_scope(isolate->heap()); RelocIterator it(*js_to_wasm_caller, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); DCHECK(!it.done()); - wasm::WasmCode* current_callee = - isolate->wasm_engine()->code_manager()->LookupCode( - it.rinfo()->js_to_wasm_address()); const wasm::WasmCode* callee_compiled = - compiled_module->GetNativeModule()->GetCode(exported_func_index); + compiled_module->GetNativeModule()->GetCode(callee_func_index); DCHECK_NOT_NULL(callee_compiled); - if (current_callee->kind() == WasmCode::kWasmToWasmWrapper) { - WasmPatchWasmToWasmWrapper(isolate, current_callee, callee_compiled); - ++patched; - } else { - DCHECK_EQ(WasmCode::kLazyStub, - isolate->wasm_engine() - ->code_manager() - ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address()) - ->kind()); - it.rinfo()->set_js_to_wasm_address( - callee_compiled->instructions().start()); - ++patched; - } - DCHECK_LT(0, patched); - TRACE_LAZY("Patched %d location(s) in the caller.\n", patched); - USE(patched); + DCHECK_EQ(WasmCode::kLazyStub, + isolate->wasm_engine() + ->code_manager() + ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address()) + ->kind()); + it.rinfo()->set_js_to_wasm_address(callee_compiled->instructions().start()); + TRACE_LAZY("Patched 1 location in js-to-wasm %p.\n", *js_to_wasm_caller); #ifdef DEBUG it.next(); @@ -704,7 +643,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm( } wasm::WasmCode* ret = - compiled_module->GetNativeModule()->GetCode(exported_func_index); + compiled_module->GetNativeModule()->GetCode(callee_func_index); DCHECK_NOT_NULL(ret); DCHECK_EQ(wasm::WasmCode::kFunction, ret->kind()); return ret; @@ -717,17 +656,25 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileIndirectCall( "Starting lazy compilation (func %u, js_to_wasm: false, patch caller: " "false). \n", func_index); - return CompileFunction(isolate, instance, func_index); + Handle compiled_module(instance->compiled_module(), + isolate); + return CompileFunction(isolate, compiled_module, func_index); } const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall( Isolate* isolate, Handle instance, - Maybe maybe_func_to_return_idx, const wasm::WasmCode* wasm_caller, - int call_offset) { - std::vector> non_compiled_functions; - Decoder decoder(nullptr, nullptr); - WasmCode* last_callee = nullptr; + const wasm::WasmCode* wasm_caller, int32_t caller_ret_offset) { + DCHECK_LE(0, caller_ret_offset); + Decoder decoder(nullptr, nullptr); + + // Gather all the targets of direct calls inside the code of {wasm_caller} + // and place their function indexes in {direct_callees}. + std::vector direct_callees; + // The last one before {caller_ret_offset} must be the call that triggered + // this lazy compilation. + int callee_pos = -1; + uint32_t num_non_compiled_callees = 0; // For stats. { DisallowHeapAllocation no_gc; Handle caller_module( @@ -742,7 +689,6 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall( ->module() ->functions[caller_func_index] .code.offset(); - int num_non_compiled_functions = 0; for (RelocIterator it(wasm_caller->instructions(), wasm_caller->reloc_info(), wasm_caller->constant_pool(), @@ -758,72 +704,62 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall( WasmCode* callee = isolate->wasm_engine()->code_manager()->LookupCode( it.rinfo()->target_address()); - if (offset < call_offset) last_callee = callee; - if (callee->kind() != WasmCode::kLazyStub) { - non_compiled_functions.push_back(Nothing()); + if (callee->kind() == WasmCode::kLazyStub) { + // The callee has not been compiled. + ++num_non_compiled_callees; + int32_t callee_func_index = + ExtractDirectCallIndex(decoder, func_bytes + byte_pos); + DCHECK_LT(callee_func_index, + caller_module->GetNativeModule()->FunctionCount()); + // {caller_ret_offset} points to one instruction after the call. + // Remember the last called function before that offset. + if (offset < caller_ret_offset) { + callee_pos = static_cast(direct_callees.size()); + } + direct_callees.push_back(callee_func_index); + } else { + // If the callee is not the lazy compile stub, assume this callee + // has already been compiled. + direct_callees.push_back(-1); continue; } - ++num_non_compiled_functions; - - uint32_t called_func_index = - ExtractDirectCallIndex(decoder, func_bytes + byte_pos); - DCHECK_LT(called_func_index, - caller_module->GetNativeModule()->FunctionCount()); - non_compiled_functions.push_back(Just(called_func_index)); - // Call offset one instruction after the call. Remember the last called - // function before that offset. - if (offset < call_offset) { - maybe_func_to_return_idx = Just(called_func_index); - } } - TRACE_LAZY("Found %d non-compiled functions in caller.\n", - num_non_compiled_functions); - USE(num_non_compiled_functions); - } - uint32_t func_to_return_idx = 0; - - if (last_callee->kind() == WasmCode::kWasmToWasmWrapper) { - const WasmCode* actual_callee = WasmExtractWasmToWasmCallee( - isolate->wasm_engine()->code_manager(), last_callee); - func_to_return_idx = actual_callee->index(); - } else { - func_to_return_idx = maybe_func_to_return_idx.ToChecked(); + TRACE_LAZY("Found %d non-compiled callees in function=%p.\n", + num_non_compiled_callees, wasm_caller); + USE(num_non_compiled_callees); } + CHECK_LE(0, callee_pos); + // TODO(wasm): compile all functions in non_compiled_callees in + // background, wait for direct_callees[callee_pos]. + auto callee_func_index = direct_callees[callee_pos]; TRACE_LAZY( - "Starting lazy compilation (func %u @%d, js_to_wasm: false, patch " - "caller: true). \n", - func_to_return_idx, call_offset); + "Starting lazy compilation (function=%p retaddr=+%d direct_callees[%d] " + "-> %d).\n", + wasm_caller, caller_ret_offset, callee_pos, callee_func_index); - // TODO(clemensh): compile all functions in non_compiled_functions in - // background, wait for func_to_return_idx. - const WasmCode* ret = CompileFunction(isolate, instance, func_to_return_idx); + Handle compiled_module(instance->compiled_module(), + isolate); + const WasmCode* ret = + CompileFunction(isolate, compiled_module, callee_func_index); DCHECK_NOT_NULL(ret); int patched = 0; - if (last_callee->kind() == WasmCode::kWasmToWasmWrapper) { - // We can finish it all here by compiling the target wasm function and - // patching the wasm_to_wasm caller. - WasmPatchWasmToWasmWrapper(isolate, last_callee, ret); - ++patched; - } else { - Handle compiled_module(instance->compiled_module(), - isolate); + { DisallowHeapAllocation no_gc; - // Now patch the code object with all functions which are now compiled. This - // will pick up any other compiled functions, not only {ret}. - size_t idx = 0; + // Now patch the code in {wasm_caller} with all functions which are now + // compiled. This will pick up any other compiled functions, not only {ret}. + size_t pos = 0; for (RelocIterator it(wasm_caller->instructions(), wasm_caller->reloc_info(), wasm_caller->constant_pool(), RelocInfo::ModeMask(RelocInfo::WASM_CALL)); - !it.done(); it.next(), ++idx) { - auto& info = non_compiled_functions[idx]; - if (info.IsNothing()) continue; - uint32_t lookup = info.ToChecked(); + !it.done(); it.next(), ++pos) { + auto callee_index = direct_callees[pos]; + if (callee_index < 0) continue; // callee already compiled. const WasmCode* callee_compiled = - compiled_module->GetNativeModule()->GetCode(lookup); + compiled_module->GetNativeModule()->GetCode(callee_index); if (callee_compiled->kind() != WasmCode::kFunction) continue; DCHECK_EQ(WasmCode::kLazyStub, isolate->wasm_engine() @@ -834,11 +770,11 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall( callee_compiled->instructions().start()); ++patched; } - DCHECK_EQ(non_compiled_functions.size(), idx); + DCHECK_EQ(direct_callees.size(), pos); } DCHECK_LT(0, patched); - TRACE_LAZY("Patched %d location(s) in the caller.\n", patched); + TRACE_LAZY("Patched %d calls(s) in %p.\n", patched, wasm_caller); USE(patched); return ret; @@ -898,13 +834,12 @@ void RecordStats(const wasm::NativeModule* native_module, Counters* counters) { } } -// Ensure that the code object in at offset has -// deoptimization data attached. This is needed for lazy compile stubs which are -// called from JS_TO_WASM functions or via exported function tables. The deopt -// data is used to determine which function this lazy compile stub belongs to. -wasm::WasmCode* EnsureExportedLazyDeoptData(Isolate* isolate, - wasm::NativeModule* native_module, - uint32_t func_index) { +// Get the code for the given {func_index} in the given native module. +// If the code at that location is the (shared) lazy compile builtin, +// clone it, specializing it to the {func_index}. +wasm::WasmCode* CloneLazyCompileStubIfNeeded(Isolate* isolate, + wasm::NativeModule* native_module, + uint32_t func_index) { wasm::WasmCode* code = native_module->GetCode(func_index); // {code} will be nullptr when exporting imports. if (code == nullptr || code->kind() != wasm::WasmCode::kLazyStub || @@ -916,153 +851,40 @@ wasm::WasmCode* EnsureExportedLazyDeoptData(Isolate* isolate, WasmCode::kFlushICache); } -// Ensure that the code object in at offset has -// deoptimization data attached. This is needed for lazy compile stubs which are -// called from JS_TO_WASM functions or via exported function tables. The deopt -// data is used to determine which function this lazy compile stub belongs to. -wasm::WasmCode* EnsureTableExportLazyDeoptData( - Isolate* isolate, wasm::NativeModule* native_module, uint32_t func_index, - Handle export_table, int export_index, - std::unordered_map* num_table_exports) { - wasm::WasmCode* code = - EnsureExportedLazyDeoptData(isolate, native_module, func_index); - if (code == nullptr || code->kind() != wasm::WasmCode::kLazyStub) return code; - - // deopt_data: - // [#0: export table - // #1: export table index] - // [#2: export table - // #3: export table index] - // ... - // num_table_exports counts down and determines the index for the new - // export table entry. - auto table_export_entry = num_table_exports->find(func_index); - DCHECK(table_export_entry != num_table_exports->end()); - DCHECK_LT(0, table_export_entry->second); - --table_export_entry->second; - uint32_t this_idx = 2 * table_export_entry->second; - int int_func_index = static_cast(func_index); - Object* deopt_entry = - native_module->compiled_module()->lazy_compile_data()->get( - int_func_index); - FixedArray* deopt_data = nullptr; - if (!deopt_entry->IsFixedArray()) { - // we count indices down, so we enter here first for the - // largest index. - deopt_data = *isolate->factory()->NewFixedArray(this_idx + 2, TENURED); - native_module->compiled_module()->lazy_compile_data()->set(int_func_index, - deopt_data); - } else { - deopt_data = FixedArray::cast(deopt_entry); - DCHECK_LE(this_idx + 2, deopt_data->length()); - } - DCHECK(deopt_data->get(this_idx)->IsUndefined(isolate)); - DCHECK(deopt_data->get(this_idx + 1)->IsUndefined(isolate)); - deopt_data->set(this_idx, *export_table); - deopt_data->set(this_idx + 1, Smi::FromInt(export_index)); - return code; -} - -bool in_bounds(uint32_t offset, uint32_t size, uint32_t upper) { +bool in_bounds(uint32_t offset, size_t size, size_t upper) { return offset + size <= upper && offset + size >= offset; } using WasmInstanceMap = IdentityMap, FreeStoreAllocationPolicy>; -wasm::WasmCode* MakeWasmToWasmWrapper( - Isolate* isolate, Handle imported_function, - FunctionSig* expected_sig, FunctionSig** sig, - WasmInstanceMap* imported_instances, Handle instance, - uint32_t index) { - // TODO(wasm): cache WASM-to-WASM wrappers by signature and clone+patch. - Handle imported_instance(imported_function->instance(), - isolate); - imported_instances->Set(imported_instance, imported_instance); - WasmContext* new_wasm_context = imported_instance->wasm_context()->get(); - Address new_wasm_context_address = - reinterpret_cast
(new_wasm_context); - *sig = imported_instance->module() - ->functions[imported_function->function_index()] - .sig; - if (expected_sig && !expected_sig->Equals(*sig)) return {}; - - Handle code = compiler::CompileWasmToWasmWrapper( - isolate, imported_function->GetWasmCode(), *sig, - new_wasm_context_address); - return instance->compiled_module()->GetNativeModule()->AddCodeCopy( - code, wasm::WasmCode::kWasmToWasmWrapper, index); -} - -wasm::WasmCode* UnwrapExportOrCompileImportWrapper( - Isolate* isolate, FunctionSig* sig, Handle target, - uint32_t import_index, ModuleOrigin origin, - WasmInstanceMap* imported_instances, Handle js_imports_table, - Handle instance) { - if (WasmExportedFunction::IsWasmExportedFunction(*target)) { - FunctionSig* unused = nullptr; - return MakeWasmToWasmWrapper( - isolate, Handle::cast(target), sig, &unused, - imported_instances, instance, import_index); - } - // No wasm function or being debugged. Compile a new wrapper for the new - // signature. - Handle temp_code = compiler::CompileWasmToJSWrapper( - isolate, target, sig, import_index, origin, - instance->compiled_module()->use_trap_handler(), js_imports_table); - return instance->compiled_module()->GetNativeModule()->AddCodeCopy( - temp_code, wasm::WasmCode::kWasmToJsWrapper, import_index); -} - -void FunctionTableFinalizer(const v8::WeakCallbackInfo& data) { - GlobalHandles::Destroy(reinterpret_cast( - reinterpret_cast(data.GetParameter()))); +double MonotonicallyIncreasingTimeInMs() { + return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() * + base::Time::kMillisecondsPerSecond; } std::unique_ptr CreateDefaultModuleEnv( Isolate* isolate, WasmModule* module) { - std::vector function_tables; - - for (size_t i = module->function_tables.size(); i > 0; --i) { - Handle func_table = - isolate->global_handles()->Create(isolate->heap()->undefined_value()); - GlobalHandles::MakeWeak(func_table.location(), func_table.location(), - &FunctionTableFinalizer, - v8::WeakCallbackType::kFinalizer); - function_tables.push_back(func_table.address()); - } - // TODO(kschimpf): Add module-specific policy handling here (see v8:7143)? bool use_trap_handler = trap_handler::IsTrapHandlerEnabled(); - return base::make_unique(module, function_tables, - use_trap_handler); + return base::make_unique(module, use_trap_handler); } Handle NewCompiledModule(Isolate* isolate, WasmModule* module, Handle export_wrappers, compiler::ModuleEnv* env) { - Handle compiled_module = - WasmCompiledModule::New(isolate, module, export_wrappers, - env->function_tables, env->use_trap_handler); + Handle compiled_module = WasmCompiledModule::New( + isolate, module, export_wrappers, env->use_trap_handler); return compiled_module; } -} // namespace - -namespace { - size_t GetMaxUsableMemorySize(Isolate* isolate) { return isolate->heap()->memory_allocator()->code_range()->valid() ? isolate->heap()->memory_allocator()->code_range()->size() : isolate->heap()->code_space()->Capacity(); } -double MonotonicallyIncreasingTimeInMs() { - return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() * - base::Time::kMillisecondsPerSecond; -} - // The CompilationUnitBuilder builds compilation units and stores them in an // internal buffer. The buffer is moved into the working queue of the // CompilationState when {Commit} is called. @@ -1382,12 +1204,19 @@ MaybeHandle CompileToModuleObjectInternal( NativeModule* native_module = compiled_module->GetNativeModule(); compiled_module->set_shared(*shared); if (lazy_compile) { - Handle lazy_compile_data = isolate->factory()->NewFixedArray( - static_cast(wasm_module->functions.size()), TENURED); - compiled_module->set_lazy_compile_data(*lazy_compile_data); - } + if (wasm_module->is_wasm()) { + // Validate wasm modules for lazy compilation. Don't validate asm.js + // modules, they are valid by construction (otherwise a CHECK will fail + // during lazy compilation). + // TODO(clemensh): According to the spec, we can actually skip validation + // at module creation time, and return a function that always traps at + // (lazy) compilation time. + ValidateSequentially(isolate, wire_bytes, env.get(), thrower); + if (thrower->error()) return {}; + } - if (!lazy_compile) { + native_module->SetLazyBuiltin(BUILTIN_CODE(isolate, WasmCompileLazy)); + } else { size_t funcs_to_compile = wasm_module->functions.size() - wasm_module->num_imported_functions; bool compile_parallel = @@ -1405,20 +1234,7 @@ MaybeHandle CompileToModuleObjectInternal( if (thrower->error()) return {}; RecordStats(native_module, isolate->async_counters().get()); - } else { - if (wasm_module->is_wasm()) { - // Validate wasm modules for lazy compilation. Don't validate asm.js - // modules, they are valid by construction (otherwise a CHECK will fail - // during lazy compilation). - // TODO(clemensh): According to the spec, we can actually skip validation - // at module creation time, and return a function that always traps at - // (lazy) compilation time. - ValidateSequentially(isolate, wire_bytes, env.get(), thrower); - } - - native_module->SetLazyBuiltin(BUILTIN_CODE(isolate, WasmCompileLazy)); } - if (thrower->error()) return {}; // Compile JS->wasm wrappers for exported functions. CompileJsToWasmWrappers(isolate, compiled_module, @@ -1591,17 +1407,17 @@ MaybeHandle InstanceBuilder::Build() { //-------------------------------------------------------------------------- // Reuse the compiled module (if no owner), otherwise clone. //-------------------------------------------------------------------------- - Handle wrapper_table; + Handle export_wrappers; wasm::NativeModule* native_module = nullptr; // Root the old instance, if any, in case later allocation causes GC, // to prevent the finalizer running for the old instance. MaybeHandle old_instance; TRACE("Starting new module instantiation\n"); + Handle original = + handle(module_object_->compiled_module()); { - Handle original = - handle(module_object_->compiled_module()); - if (original->has_weak_owning_instance()) { + if (original->has_instance()) { old_instance = handle(original->owning_instance()); // Clone, but don't insert yet the clone in the instances chain. // We do that last. Since we are holding on to the old instance, @@ -1610,19 +1426,19 @@ MaybeHandle InstanceBuilder::Build() { TRACE("Cloning from %zu\n", original->GetNativeModule()->instance_id); compiled_module_ = WasmCompiledModule::Clone(isolate_, original); native_module = compiled_module_->GetNativeModule(); - wrapper_table = handle(compiled_module_->export_wrappers(), isolate_); - for (int i = 0; i < wrapper_table->length(); ++i) { - Handle orig_code(Code::cast(wrapper_table->get(i)), isolate_); + export_wrappers = handle(compiled_module_->export_wrappers(), isolate_); + for (int i = 0; i < export_wrappers->length(); ++i) { + Handle orig_code(Code::cast(export_wrappers->get(i)), isolate_); DCHECK_EQ(orig_code->kind(), Code::JS_TO_WASM_FUNCTION); Handle code = factory->CopyCode(orig_code); - wrapper_table->set(i, *code); + export_wrappers->set(i, *code); } RecordStats(native_module, counters()); - RecordStats(wrapper_table, counters()); + RecordStats(export_wrappers, counters()); } else { // No instance owned the original compiled module. compiled_module_ = original; - wrapper_table = handle(compiled_module_->export_wrappers(), isolate_); + export_wrappers = handle(compiled_module_->export_wrappers(), isolate_); native_module = compiled_module_->GetNativeModule(); TRACE("Reusing existing instance %zu\n", compiled_module_->GetNativeModule()->instance_id); @@ -1644,11 +1460,17 @@ MaybeHandle InstanceBuilder::Build() { CodeSpecialization code_specialization(isolate_, &instantiation_zone); Handle instance = WasmInstanceObject::New(isolate_, compiled_module_); + Handle weak_instance = factory->NewWeakCell(instance); + Handle old_weak_instance(original->weak_owning_instance(), + isolate_); + code_specialization.UpdateInstanceReferences(old_weak_instance, + weak_instance); + js_to_wasm_cache_.SetWeakInstance(weak_instance); //-------------------------------------------------------------------------- // Set up the globals for the new instance. //-------------------------------------------------------------------------- - WasmContext* wasm_context = instance->wasm_context()->get(); + MaybeHandle old_globals; uint32_t globals_size = module_->globals_size; if (globals_size > 0) { constexpr bool enable_guard_regions = false; @@ -1657,8 +1479,8 @@ MaybeHandle InstanceBuilder::Build() { thrower_->RangeError("Out of memory: wasm globals"); return {}; } - wasm_context->globals_start = - reinterpret_cast(globals_->backing_store()); + instance->set_globals_start( + reinterpret_cast(globals_->backing_store())); instance->set_globals_buffer(*globals_); } @@ -1731,12 +1553,12 @@ MaybeHandle InstanceBuilder::Build() { WasmMemoryObject::AddInstance(isolate_, memory_object, instance); if (!memory_.is_null()) { - // Double-check the {memory} array buffer matches the context. + // Double-check the {memory} array buffer matches the instance. Handle memory = memory_.ToHandleChecked(); uint32_t mem_size = 0; CHECK(memory->byte_length()->ToUint32(&mem_size)); - CHECK_EQ(wasm_context->mem_size, mem_size); - CHECK_EQ(wasm_context->mem_start, memory->backing_store()); + CHECK_EQ(instance->memory_size(), mem_size); + CHECK_EQ(instance->memory_start(), memory->backing_store()); } } @@ -1746,11 +1568,8 @@ MaybeHandle InstanceBuilder::Build() { for (WasmTableInit& table_init : module_->table_inits) { DCHECK(table_init.table_index < table_instances_.size()); uint32_t base = EvalUint32InitExpr(table_init.offset); - uint32_t table_size = - table_instances_[table_init.table_index].function_table->length() / - compiler::kFunctionTableEntrySize; - if (!in_bounds(base, static_cast(table_init.entries.size()), - table_size)) { + size_t table_size = table_instances_[table_init.table_index].table_size; + if (!in_bounds(base, table_init.entries.size(), table_size)) { thrower_->LinkError("table initializer is out of bounds"); return {}; } @@ -1761,19 +1580,12 @@ MaybeHandle InstanceBuilder::Build() { //-------------------------------------------------------------------------- for (WasmDataSegment& seg : module_->data_segments) { uint32_t base = EvalUint32InitExpr(seg.dest_addr); - if (!in_bounds(base, seg.source.length(), wasm_context->mem_size)) { + if (!in_bounds(base, seg.source.length(), instance->memory_size())) { thrower_->LinkError("data segment is out of bounds"); return {}; } } - // Set the WasmContext address in wrappers. - // TODO(wasm): the wasm context should only appear as a constant in wrappers; - // this code specialization is applied to the whole instance. - Address wasm_context_address = reinterpret_cast
(wasm_context); - code_specialization.RelocateWasmContextReferences(wasm_context_address); - js_to_wasm_cache_.SetContextAddress(wasm_context_address); - //-------------------------------------------------------------------------- // Set up the exports object for the new instance. //-------------------------------------------------------------------------- @@ -1791,7 +1603,7 @@ MaybeHandle InstanceBuilder::Build() { // Initialize the memory by loading data segments. //-------------------------------------------------------------------------- if (module_->data_segments.size() > 0) { - LoadDataSegments(wasm_context); + LoadDataSegments(instance); } // Patch all code with the relocations registered in code_specialization. @@ -1799,7 +1611,7 @@ MaybeHandle InstanceBuilder::Build() { code_specialization.ApplyToWholeModule(native_module, SKIP_ICACHE_FLUSH); FlushICache(native_module); - FlushICache(wrapper_table); + FlushICache(export_wrappers); //-------------------------------------------------------------------------- // Unpack and notify signal handler of protected instructions. @@ -1812,14 +1624,13 @@ MaybeHandle InstanceBuilder::Build() { // Insert the compiled module into the weak list of compiled modules. //-------------------------------------------------------------------------- { - Handle link_to_owning_instance = factory->NewWeakCell(instance); if (!old_instance.is_null()) { // Publish the new instance to the instances chain. DisallowHeapAllocation no_gc; compiled_module_->InsertInChain(*module_object_); } module_object_->set_compiled_module(*compiled_module_); - compiled_module_->set_weak_owning_instance(*link_to_owning_instance); + compiled_module_->set_weak_owning_instance(*weak_instance); WasmInstanceObject::InstallFinalizer(isolate_, instance); } @@ -1850,7 +1661,7 @@ MaybeHandle InstanceBuilder::Build() { if (module_->start_function_index >= 0) { int start_index = module_->start_function_index; wasm::WasmCode* start_code = - EnsureExportedLazyDeoptData(isolate_, native_module, start_index); + CloneLazyCompileStubIfNeeded(isolate_, native_module, start_index); FunctionSig* sig = module_->functions[start_index].sig; Handle wrapper_code = js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper( isolate_, module_, start_code, start_index, @@ -1969,7 +1780,7 @@ uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) { } // Load data segments into the memory. -void InstanceBuilder::LoadDataSegments(WasmContext* wasm_context) { +void InstanceBuilder::LoadDataSegments(Handle instance) { Handle module_bytes( compiled_module_->shared()->module_bytes(), isolate_); for (const WasmDataSegment& segment : module_->data_segments) { @@ -1977,8 +1788,8 @@ void InstanceBuilder::LoadDataSegments(WasmContext* wasm_context) { // Segments of size == 0 are just nops. if (source_size == 0) continue; uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr); - DCHECK(in_bounds(dest_offset, source_size, wasm_context->mem_size)); - byte* dest = wasm_context->mem_start + dest_offset; + DCHECK(in_bounds(dest_offset, source_size, instance->memory_size())); + byte* dest = instance->memory_start() + dest_offset; const byte* src = reinterpret_cast( module_bytes->GetCharsAddress() + segment.source.offset()); memcpy(dest, src, source_size); @@ -2050,38 +1861,12 @@ void InstanceBuilder::SanitizeImports() { } } -Handle InstanceBuilder::SetupWasmToJSImportsTable( - Handle instance) { - // The js_imports_table is set up so that index 0 has isolate->native_context - // and for every index, 3*index+1 has the JSReceiver, 3*index+2 has function's - // global proxy and 3*index+3 has function's context. Hence, the fixed array's - // size is 3*import_table.size+1. - int size = static_cast(module_->import_table.size()); - CHECK_LE(size, (kMaxInt - 1) / 3); - Handle func_table = - isolate_->factory()->NewFixedArray(3 * size + 1, TENURED); - Handle js_imports_table = - isolate_->global_handles()->Create(*func_table); - GlobalHandles::MakeWeak( - reinterpret_cast(js_imports_table.location()), - js_imports_table.location(), &FunctionTableFinalizer, - v8::WeakCallbackType::kFinalizer); - instance->set_js_imports_table(*func_table); - js_imports_table->set(0, *isolate_->native_context()); - return js_imports_table; -} - // Process the imports, including functions, tables, globals, and memory, in // order, loading them from the {ffi_} object. Returns the number of imported // functions. int InstanceBuilder::ProcessImports(Handle instance) { - using compiler::kFunctionTableSignatureOffset; - using compiler::kFunctionTableCodeOffset; - using compiler::kFunctionTableEntrySize; int num_imported_functions = 0; int num_imported_tables = 0; - Handle js_imports_table = SetupWasmToJSImportsTable(instance); - WasmInstanceMap imported_wasm_instances(isolate_->heap()); SetOfNativeModuleModificationScopes set_of_native_module_scopes; DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size()); @@ -2092,6 +1877,8 @@ int InstanceBuilder::ProcessImports(Handle instance) { Handle module_name = sanitized_imports_[index].module_name; Handle import_name = sanitized_imports_[index].import_name; Handle value = sanitized_imports_[index].value; + NativeModule* native_module = + instance->compiled_module()->GetNativeModule(); switch (import.kind) { case kExternalFunction: { @@ -2101,17 +1888,44 @@ int InstanceBuilder::ProcessImports(Handle instance) { module_name, import_name); return -1; } - wasm::WasmCode* import_code = UnwrapExportOrCompileImportWrapper( - isolate_, module_->functions[import.index].sig, - Handle::cast(value), num_imported_functions, - module_->origin(), &imported_wasm_instances, js_imports_table, - instance); - if (import_code == nullptr) { - ReportLinkError("imported function does not match the expected type", - index, module_name, import_name); - return -1; + uint32_t func_index = import.index; + DCHECK_EQ(num_imported_functions, func_index); + FunctionSig* expected_sig = module_->functions[func_index].sig; + if (WasmExportedFunction::IsWasmExportedFunction(*value)) { + // The imported function is a WASM function from another instance. + Handle imported_function( + WasmExportedFunction::cast(*value), isolate_); + Handle imported_instance( + imported_function->instance(), isolate_); + FunctionSig* imported_sig = + imported_instance->module() + ->functions[imported_function->function_index()] + .sig; + if (!imported_sig->Equals(expected_sig)) { + ReportLinkError( + "imported function does not match the expected type", index, + module_name, import_name); + return -1; + } + // The import reference is the instance object itself. + auto entry = instance->imported_function_entry_at(func_index); + auto wasm_code = imported_function->GetWasmCode(); + entry.set(imported_instance, wasm_code); + native_module->SetCode(func_index, wasm_code); + } else { + // The imported function is a callable. + Handle js_receiver(JSReceiver::cast(*value), isolate_); + Handle wrapper_code = compiler::CompileWasmToJSWrapper( + isolate_, js_receiver, expected_sig, func_index, + module_->origin(), + instance->compiled_module()->use_trap_handler()); + RecordStats(*wrapper_code, counters()); + + WasmCode* wasm_code = native_module->AddCodeCopy( + wrapper_code, wasm::WasmCode::kWasmToJsWrapper, func_index); + auto entry = instance->imported_function_entry_at(func_index); + entry.set(js_receiver, wasm_code); } - RecordStats(import_code, counters()); num_imported_functions++; break; } @@ -2121,19 +1935,20 @@ int InstanceBuilder::ProcessImports(Handle instance) { module_name, import_name); return -1; } - WasmIndirectFunctionTable& table = - module_->function_tables[num_imported_tables]; - TableInstance& table_instance = table_instances_[num_imported_tables]; + uint32_t table_num = import.index; + DCHECK_EQ(table_num, num_imported_tables); + WasmIndirectFunctionTable& table = module_->function_tables[table_num]; + TableInstance& table_instance = table_instances_[table_num]; table_instance.table_object = Handle::cast(value); instance->set_table_object(*table_instance.table_object); table_instance.js_wrappers = Handle( table_instance.table_object->functions(), isolate_); - int imported_cur_size = table_instance.js_wrappers->length(); - if (imported_cur_size < static_cast(table.initial_size)) { + int imported_table_size = table_instance.js_wrappers->length(); + if (imported_table_size < static_cast(table.initial_size)) { thrower_->LinkError( "table import %d is smaller than initial %d, got %u", index, - table.initial_size, imported_cur_size); + table.initial_size, imported_table_size); return -1; } @@ -2155,20 +1970,15 @@ int InstanceBuilder::ProcessImports(Handle instance) { } } - // Allocate a new dispatch table, containing pairs. - CHECK_GE(kMaxInt / kFunctionTableEntrySize, imported_cur_size); - int table_size = kFunctionTableEntrySize * imported_cur_size; - table_instance.function_table = - isolate_->factory()->NewFixedArray(table_size); - for (int i = kFunctionTableSignatureOffset; i < table_size; - i += kFunctionTableEntrySize) { - table_instance.function_table->set(i, Smi::FromInt(kInvalidSigIndex)); + // Allocate a new dispatch table. + if (!instance->has_indirect_function_table()) { + WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize( + instance, imported_table_size); + table_instances_[table_num].table_size = imported_table_size; } - WasmContext* wasm_context = instance->wasm_context()->get(); - EnsureWasmContextTable(wasm_context, imported_cur_size); // Initialize the dispatch table with the (foreign) JS functions // that are already in the table. - for (int i = 0; i < imported_cur_size; ++i) { + for (int i = 0; i < imported_table_size; ++i) { Handle val(table_instance.js_wrappers->get(i), isolate_); // TODO(mtrofin): this is the same logic as WasmTableObject::Set: // insert in the local table a wrapper from the other module, and add @@ -2189,12 +1999,10 @@ int InstanceBuilder::ProcessImports(Handle instance) { FunctionSig* sig = imported_instance->module() ->functions[exported_code->index()] .sig; - auto& entry = wasm_context->table[i]; - entry.context = imported_instance->wasm_context()->get(); - entry.sig_id = module_->signature_map.Find(sig); - entry.target = exported_code->instructions().start(); + auto entry = instance->indirect_function_table_entry_at(i); + entry.set(module_->signature_map.Find(sig), imported_instance, + exported_code); } - num_imported_tables++; break; } @@ -2283,18 +2091,6 @@ int InstanceBuilder::ProcessImports(Handle instance) { } } - if (!imported_wasm_instances.empty()) { - WasmInstanceMap::IteratableScope iteratable_scope(&imported_wasm_instances); - Handle instances_array = isolate_->factory()->NewFixedArray( - imported_wasm_instances.size(), TENURED); - instance->set_directly_called_instances(*instances_array); - int index = 0; - for (auto it = iteratable_scope.begin(), end = iteratable_scope.end(); - it != end; ++it, ++index) { - instances_array->set(index, ***it); - } - } - return num_imported_functions; } @@ -2378,8 +2174,8 @@ bool InstanceBuilder::NeedsWrappers() const { void InstanceBuilder::ProcessExports( Handle instance, Handle compiled_module) { - Handle wrapper_table(compiled_module->export_wrappers(), - isolate_); + Handle export_wrappers(compiled_module->export_wrappers(), + isolate_); if (NeedsWrappers()) { // Fill the table to cache the exported JSFunction wrappers. js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(), @@ -2458,7 +2254,7 @@ void InstanceBuilder::ProcessExports( if (js_function.is_null()) { // Wrap the exported code as a JSFunction. Handle export_code = - wrapper_table->GetValueChecked(isolate_, export_index); + export_wrappers->GetValueChecked(isolate_, export_index); MaybeHandle func_name; if (module_->is_asm_js()) { // For modules arising from asm.js, honor the names section. @@ -2558,95 +2354,31 @@ void InstanceBuilder::ProcessExports( void InstanceBuilder::InitializeTables( Handle instance, CodeSpecialization* code_specialization) { - size_t function_table_count = module_->function_tables.size(); - - // function_table_count is 0 or 1, so we just create these objects even if not - // needed for native wasm. - - // These go on the instance. - Handle rooted_function_tables = - isolate_->factory()->NewFixedArray(static_cast(function_table_count), - TENURED); - - instance->set_function_tables(*rooted_function_tables); - - for (size_t index = 0; index < function_table_count; ++index) { + size_t table_count = module_->function_tables.size(); + for (size_t index = 0; index < table_count; ++index) { WasmIndirectFunctionTable& table = module_->function_tables[index]; TableInstance& table_instance = table_instances_[index]; - // The table holds pairs. - CHECK_GE(kMaxInt / compiler::kFunctionTableEntrySize, table.initial_size); - int num_table_entries = static_cast(table.initial_size); - int table_size = compiler::kFunctionTableEntrySize * num_table_entries; - WasmContext* wasm_context = instance->wasm_context()->get(); - EnsureWasmContextTable(wasm_context, num_table_entries); - - if (table_instance.function_table.is_null()) { - // Create a new dispatch table if necessary. - table_instance.function_table = - isolate_->factory()->NewFixedArray(table_size); - for (int i = compiler::kFunctionTableSignatureOffset; i < table_size; - i += compiler::kFunctionTableEntrySize) { - // Fill the table with invalid signature indexes so that - // uninitialized entries will always fail the signature check. - table_instance.function_table->set(i, Smi::FromInt(kInvalidSigIndex)); - } + if (!instance->has_indirect_function_table()) { + WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize( + instance, table.initial_size); + table_instance.table_size = table.initial_size; } - int int_index = static_cast(index); - - Handle global_func_table = - isolate_->global_handles()->Create(*table_instance.function_table); - // Make the handles weak. The table objects are rooted on the instance, as - // they belong to it. We need the global handles in order to have stable - // pointers to embed in the instance's specialization (wasm compiled code). - // The order of finalization doesn't matter, in that the instance finalizer - // may be called before each table's finalizer, or vice-versa. - // This is because values used for embedding are only interesting should we - // {Reset} a specialization, in which case they are interesting as values, - // they are not dereferenced. - GlobalHandles::MakeWeak( - reinterpret_cast(global_func_table.location()), - global_func_table.location(), &FunctionTableFinalizer, - v8::WeakCallbackType::kFinalizer); - - rooted_function_tables->set(int_index, *global_func_table); } } void InstanceBuilder::LoadTableSegments(Handle instance) { - wasm::NativeModule* native_module = compiled_module_->GetNativeModule(); + NativeModule* native_module = compiled_module_->GetNativeModule(); int function_table_count = static_cast(module_->function_tables.size()); for (int index = 0; index < function_table_count; ++index) { TableInstance& table_instance = table_instances_[index]; - // Count the number of table exports for each function (needed for lazy - // compilation). - std::unordered_map num_table_exports; - if (compile_lazy(module_)) { - for (auto& table_init : module_->table_inits) { - for (uint32_t func_index : table_init.entries) { - const wasm::WasmCode* code = native_module->GetCode(func_index); - // Only increase the counter for lazy compile builtins (it's not - // needed otherwise). - if (code->kind() != wasm::WasmCode::kLazyStub) { - DCHECK(code->kind() == wasm::WasmCode::kFunction || - code->kind() == wasm::WasmCode::kWasmToJsWrapper || - code->kind() == wasm::WasmCode::kWasmToWasmWrapper); - continue; - } - ++num_table_exports[func_index]; - } - } - } - // TODO(titzer): this does redundant work if there are multiple tables, // since initializations are not sorted by table index. for (auto& table_init : module_->table_inits) { uint32_t base = EvalUint32InitExpr(table_init.offset); uint32_t num_entries = static_cast(table_init.entries.size()); - DCHECK(in_bounds(base, num_entries, - table_instance.function_table->length() / - compiler::kFunctionTableEntrySize)); + DCHECK(in_bounds(base, num_entries, table_instance.table_size)); for (uint32_t i = 0; i < num_entries; ++i) { uint32_t func_index = table_init.entries[i]; WasmFunction* function = &module_->functions[func_index]; @@ -2654,22 +2386,11 @@ void InstanceBuilder::LoadTableSegments(Handle instance) { // Update the local dispatch table first. uint32_t sig_id = module_->signature_ids[function->sig_index]; - table_instance.function_table->set( - compiler::FunctionTableSigOffset(table_index), - Smi::FromInt(sig_id)); - wasm::WasmCode* wasm_code = EnsureTableExportLazyDeoptData( - isolate_, native_module, func_index, table_instance.function_table, - table_index, &num_table_exports); - Handle as_foreign = isolate_->factory()->NewForeign( - wasm_code->instructions().start(), TENURED); - table_instance.function_table->set( - compiler::FunctionTableCodeOffset(table_index), *as_foreign); + wasm::WasmCode* wasm_code = + CloneLazyCompileStubIfNeeded(isolate_, native_module, func_index); - WasmContext* wasm_context = instance->wasm_context()->get(); - auto& entry = wasm_context->table[table_index]; - entry.sig_id = sig_id; - entry.context = wasm_context; - entry.target = wasm_code->instructions().start(); + auto entry = instance->indirect_function_table_entry_at(table_index); + entry.set(sig_id, instance, wasm_code); if (!table_instance.table_object.is_null()) { // Update the table object's other dispatch tables. @@ -2706,27 +2427,18 @@ void InstanceBuilder::LoadTableSegments(Handle instance) { // UpdateDispatchTables() should update this instance as well. WasmTableObject::UpdateDispatchTables( isolate_, table_instance.table_object, table_index, function->sig, - instance, wasm_code, func_index); + instance, wasm_code); } } } -#ifdef DEBUG - // Check that the count of table exports was accurate. The entries are - // decremented on each export, so all should be zero now. - for (auto e : num_table_exports) { - DCHECK_EQ(0, e.second); - } -#endif - // TODO(titzer): we add the new dispatch table at the end to avoid // redundant work and also because the new instance is not yet fully // initialized. if (!table_instance.table_object.is_null()) { // Add the new dispatch table to the WebAssembly.Table object. WasmTableObject::AddDispatchTable(isolate_, table_instance.table_object, - instance, index, - table_instance.function_table); + instance, index); } } } @@ -3579,6 +3291,9 @@ void CompileJsToWasmWrappers(Isolate* isolate, Handle compiled_module, Counters* counters) { JSToWasmWrapperCache js_to_wasm_cache; + Handle weak_instance(compiled_module->weak_owning_instance(), + isolate); + js_to_wasm_cache.SetWeakInstance(weak_instance); int wrapper_index = 0; Handle export_wrappers(compiled_module->export_wrappers(), isolate); @@ -3586,7 +3301,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, for (auto exp : compiled_module->shared()->module()->export_table) { if (exp.kind != kExternalFunction) continue; wasm::WasmCode* wasm_code = - EnsureExportedLazyDeoptData(isolate, native_module, exp.index); + CloneLazyCompileStubIfNeeded(isolate, native_module, exp.index); Handle wrapper_code = js_to_wasm_cache.CloneOrCompileJSToWasmWrapper( isolate, compiled_module->shared()->module(), wasm_code, exp.index, compiled_module->use_trap_handler()); @@ -3630,7 +3345,6 @@ Handle