diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc index 5be27be086..951a1e6f27 100644 --- a/src/arm/assembler-arm.cc +++ b/src/arm/assembler-arm.cc @@ -1153,6 +1153,7 @@ int Operand::InstructionsRequired(const Assembler* assembler, void Assembler::Move32BitImmediate(Register rd, const Operand& x, Condition cond) { if (UseMovImmediateLoad(x, this)) { + CpuFeatureScope scope(this, ARMv7); // UseMovImmediateLoad should return false when we need to output // relocation info, since we prefer the constant pool for values that // can be patched. @@ -1160,12 +1161,9 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x, UseScratchRegisterScope temps(this); // Re-use the destination register as a scratch if possible. Register target = rd != pc ? rd : temps.Acquire(); - if (CpuFeatures::IsSupported(ARMv7)) { - uint32_t imm32 = static_cast(x.immediate()); - CpuFeatureScope scope(this, ARMv7); - movw(target, imm32 & 0xFFFF, cond); - movt(target, imm32 >> 16, cond); - } + uint32_t imm32 = static_cast(x.immediate()); + movw(target, imm32 & 0xFFFF, cond); + movt(target, imm32 >> 16, cond); if (target.code() != rd.code()) { mov(rd, target, LeaveCC, cond); } diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h index a23f94e207..a7299dba8d 100644 --- a/src/arm/assembler-arm.h +++ b/src/arm/assembler-arm.h @@ -1549,6 +1549,9 @@ class Assembler : public AssemblerBase { UNREACHABLE(); } + // Move a 32-bit immediate into a register, potentially via the constant pool. + void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al); + protected: int buffer_space() const { return reloc_info_writer.pos() - pc_; } @@ -1680,9 +1683,6 @@ class Assembler : public AssemblerBase { inline void CheckBuffer(); void GrowBuffer(); - // 32-bit immediate values - void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al); - // Instruction generation void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x); // Attempt to encode operand |x| for instruction |instr| and return true on diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc index 1127bec195..dbf2dd31ae 100644 --- a/src/builtins/arm/builtins-arm.cc +++ b/src/builtins/arm/builtins-arm.cc @@ -2294,6 +2294,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { } void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { + // The function index was put in r4 by the jump table trampoline. + // Convert to Smi for the runtime call. + __ SmiTag(r4, r4); { TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -2308,8 +2311,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ stm(db_w, sp, gp_regs); __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg); - // Pass the WASM instance as an explicit argument to WasmCompileLazy. + // Pass instance and function index as explicit arguments to the runtime + // function. __ push(kWasmInstanceRegister); + __ push(r4); // Load the correct CEntry builtin from the instance object. __ ldr(r2, FieldMemOperand(kWasmInstanceRegister, WasmInstanceObject::kCEntryStubOffset)); diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc index 8f8b6985db..2329d0a54a 100644 --- a/src/builtins/arm64/builtins-arm64.cc +++ b/src/builtins/arm64/builtins-arm64.cc @@ -2746,6 +2746,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { } void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { + // The function index was put in w8 by the jump table trampoline. + // Sign extend and convert to Smi for the runtime call. + __ sxtw(x8, w8); + __ SmiTag(x8, x8); { TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -2760,8 +2764,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ PushXRegList(gp_regs); __ PushDRegList(fp_regs); - // Pass the WASM instance as an explicit argument to WasmCompileLazy. - __ PushArgument(kWasmInstanceRegister); + // Pass instance and function index as explicit arguments to the runtime + // function. + __ Push(kWasmInstanceRegister, x8); // Load the correct CEntry builtin from the instance object. __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister, WasmInstanceObject::kCEntryStubOffset)); diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc index d7c51f55cf..a6634fb9a7 100644 --- a/src/builtins/ia32/builtins-ia32.cc +++ b/src/builtins/ia32/builtins-ia32.cc @@ -2481,6 +2481,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { } void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { + // The function index was put in edi by the jump table trampoline. + // Convert to Smi for the runtime call. + __ SmiTag(edi); { TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -2504,8 +2507,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { offset += kSimd128Size; } - // Pass the WASM instance as an explicit argument to WasmCompileLazy. + // Push the WASM instance as an explicit argument to WasmCompileLazy. __ Push(kWasmInstanceRegister); + // Push the function index as second argument. + __ Push(edi); // Load the correct CEntry builtin from the instance object. __ mov(ecx, FieldOperand(kWasmInstanceRegister, WasmInstanceObject::kCEntryStubOffset)); diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc index 8815a6c231..20dda8e15e 100644 --- a/src/builtins/x64/builtins-x64.cc +++ b/src/builtins/x64/builtins-x64.cc @@ -2423,6 +2423,10 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { } void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { + // The function index was pushed to the stack by the caller as int32. + __ Pop(r11); + // Convert to Smi for the runtime call. + __ SmiTag(r11, r11); { TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -2446,8 +2450,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { offset += kSimd128Size; } - // Pass the WASM instance as an explicit argument to WasmCompileLazy. + // Push the WASM instance as an explicit argument to WasmCompileLazy. __ Push(kWasmInstanceRegister); + // Push the function index as second argument. + __ Push(r11); // Load the correct CEntry builtin from the instance object. __ movp(rcx, FieldOperand(kWasmInstanceRegister, WasmInstanceObject::kCEntryStubOffset)); diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc index a4a14b8b5e..911b8dd39c 100644 --- a/src/ia32/assembler-ia32.cc +++ b/src/ia32/assembler-ia32.cc @@ -3216,6 +3216,12 @@ void Assembler::GrowBuffer() { *p += pc_delta; } + // Relocate js-to-wasm calls (which are encoded pc-relative). + for (RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); + !it.done(); it.next()) { + it.rinfo()->apply(pc_delta); + } + DCHECK(!buffer_overflow()); } diff --git a/src/objects-printer.cc b/src/objects-printer.cc index 968802febf..44def8828a 100644 --- a/src/objects-printer.cc +++ b/src/objects-printer.cc @@ -1739,10 +1739,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT os << "\n - managed_native_allocations: " << Brief(managed_native_allocations()); } - if (has_managed_indirect_patcher()) { - os << "\n - managed_indirect_patcher: " - << Brief(managed_indirect_patcher()); - } os << "\n - memory_start: " << static_cast(memory_start()); os << "\n - memory_size: " << memory_size(); os << "\n - memory_mask: " << AsHex(memory_mask()); diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc index c4dc379642..63ecaa53ef 100644 --- a/src/runtime/runtime-wasm.cc +++ b/src/runtime/runtime-wasm.cc @@ -291,8 +291,9 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) { RUNTIME_FUNCTION(Runtime_WasmCompileLazy) { HandleScope scope(isolate); - DCHECK_EQ(1, args.length()); + DCHECK_EQ(2, args.length()); CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0); + CONVERT_SMI_ARG_CHECKED(func_index, 1); ClearThreadInWasmScope wasm_flag(true); @@ -306,7 +307,8 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) { DCHECK_EQ(*instance, WasmCompileLazyFrame::cast(it.frame())->wasm_instance()); #endif - Address entrypoint = wasm::CompileLazy(isolate, instance); + Address entrypoint = wasm::CompileLazy( + isolate, instance->compiled_module()->GetNativeModule(), func_index); return reinterpret_cast(entrypoint); } diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index c3c40d5343..f51e7d5a19 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -581,7 +581,7 @@ namespace internal { F(WasmThrow, 0, 1) \ F(WasmThrowCreate, 2, 1) \ F(WasmThrowTypeError, 0, 1) \ - F(WasmCompileLazy, 1, 1) + F(WasmCompileLazy, 2, 1) #define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \ F(DebugBreakOnBytecode, 1, 2) \ diff --git a/src/wasm/jump-table-assembler.cc b/src/wasm/jump-table-assembler.cc index 2a17992546..ce18359ce7 100644 --- a/src/wasm/jump-table-assembler.cc +++ b/src/wasm/jump-table-assembler.cc @@ -4,6 +4,7 @@ #include "src/wasm/jump-table-assembler.h" +#include "src/assembler-inl.h" #include "src/macro-assembler-inl.h" namespace v8 { @@ -27,6 +28,122 @@ void JumpTableAssembler::EmitJumpTrampoline(Address target) { #endif } +// The implementation is compact enough to implement it inline here. If it gets +// much bigger, we might want to split it in a separate file per architecture. +#if V8_TARGET_ARCH_X64 +void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, + Address lazy_compile_target) { + // TODO(clemensh): Try more efficient sequences. + // Alternative 1: + // [header]: mov r10, [lazy_compile_target] + // jmp r10 + // [slot 0]: push [0] + // jmp [header] // pc-relative --> slot size: 10 bytes + // + // Alternative 2: + // [header]: lea r10, [rip - [header]] + // shr r10, 3 // compute index from offset + // push r10 + // mov r10, [lazy_compile_target] + // jmp r10 + // [slot 0]: call [header] + // ret // -> slot size: 5 bytes + + // Use a push, because mov to an extended register takes 6 bytes. + pushq(Immediate(func_index)); // max 5 bytes + movq(kScratchRegister, uint64_t{lazy_compile_target}); // max 10 bytes + jmp(kScratchRegister); // 3 bytes +} + +void JumpTableAssembler::EmitJumpSlot(Address target) { + movq(kScratchRegister, static_cast(target)); + jmp(kScratchRegister); +} + +void JumpTableAssembler::NopBytes(int bytes) { + DCHECK_LE(0, bytes); + Nop(bytes); +} + +#elif V8_TARGET_ARCH_IA32 +void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, + Address lazy_compile_target) { + mov(edi, func_index); // 5 bytes + jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes +} + +void JumpTableAssembler::EmitJumpSlot(Address target) { + jmp(target, RelocInfo::NONE); +} + +void JumpTableAssembler::NopBytes(int bytes) { + DCHECK_LE(0, bytes); + Nop(bytes); +} + +#elif V8_TARGET_ARCH_ARM +void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, + Address lazy_compile_target) { + // Load function index to r4. + // This generates <= 3 instructions: ldr, const pool start, constant + Move32BitImmediate(r4, Operand(func_index)); + // Jump to {lazy_compile_target}. + int offset = + lazy_compile_target - reinterpret_cast
(pc_) - kPcLoadDelta; + DCHECK_EQ(0, offset % kInstrSize); + DCHECK(is_int26(offset)); // 26 bit imm + b(offset); // 1 instr + CheckConstPool(true, false); // force emit of const pool +} + +void JumpTableAssembler::EmitJumpSlot(Address target) { + int offset = target - reinterpret_cast
(pc_) - kPcLoadDelta; + DCHECK_EQ(0, offset % kInstrSize); + DCHECK(is_int26(offset)); // 26 bit imm + b(offset); +} + +void JumpTableAssembler::NopBytes(int bytes) { + DCHECK_LE(0, bytes); + DCHECK_EQ(0, bytes % kInstrSize); + for (; bytes > 0; bytes -= kInstrSize) { + nop(); + } +} + +#elif V8_TARGET_ARCH_ARM64 +void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, + Address lazy_compile_target) { + Mov(w8, func_index); // max. 2 instr + Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr +} + +void JumpTableAssembler::EmitJumpSlot(Address target) { + Jump(target, RelocInfo::NONE); +} + +void JumpTableAssembler::NopBytes(int bytes) { + DCHECK_LE(0, bytes); + DCHECK_EQ(0, bytes % kInstructionSize); + for (; bytes > 0; bytes -= kInstructionSize) { + nop(); + } +} + +#else +void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, + Address lazy_compile_target) { + UNIMPLEMENTED(); +} + +void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); } + +void JumpTableAssembler::NopBytes(int bytes) { + DCHECK_LE(0, bytes); + UNIMPLEMENTED(); +} +#endif + } // namespace wasm } // namespace internal } // namespace v8 diff --git a/src/wasm/jump-table-assembler.h b/src/wasm/jump-table-assembler.h index 6b73623e93..118e1b1161 100644 --- a/src/wasm/jump-table-assembler.h +++ b/src/wasm/jump-table-assembler.h @@ -6,6 +6,7 @@ #define V8_WASM_JUMP_TABLE_ASSEMBLER_H_ #include "src/macro-assembler.h" +#include "src/wasm/wasm-code-manager.h" namespace v8 { namespace internal { @@ -26,8 +27,42 @@ class JumpTableAssembler : public TurboAssembler { public: JumpTableAssembler() : TurboAssembler(GetDefaultIsolateData(), nullptr, 0) {} + // Instantiate a {JumpTableAssembler} for patching. + explicit JumpTableAssembler(Address slot_addr, int size = 256) + : TurboAssembler(GetDefaultIsolateData(), + reinterpret_cast(slot_addr), size) {} + // Emit a trampoline to a possibly far away code target. void EmitJumpTrampoline(Address target); + +#if V8_TARGET_ARCH_X64 + static constexpr int kJumpTableSlotSize = 18; +#elif V8_TARGET_ARCH_IA32 + static constexpr int kJumpTableSlotSize = 10; +#elif V8_TARGET_ARCH_ARM + static constexpr int kJumpTableSlotSize = 4 * kInstrSize; +#elif V8_TARGET_ARCH_ARM64 + static constexpr int kJumpTableSlotSize = 3 * kInstructionSize; +#else + static constexpr int kJumpTableSlotSize = 1; +#endif + + void EmitLazyCompileJumpSlot(uint32_t func_index, + Address lazy_compile_target); + + void EmitJumpSlot(Address target); + + void NopBytes(int bytes); + + static void PatchJumpTableSlot(Address slot, Address new_target, + WasmCode::FlushICache flush_i_cache) { + JumpTableAssembler jsasm(slot); + jsasm.EmitJumpSlot(new_target); + jsasm.NopBytes(kJumpTableSlotSize - jsasm.pc_offset()); + if (flush_i_cache) { + Assembler::FlushICache(slot, kJumpTableSlotSize); + } + } }; } // namespace wasm diff --git a/src/wasm/module-compiler.cc b/src/wasm/module-compiler.cc index 319b1f62a6..4ac8773b29 100644 --- a/src/wasm/module-compiler.cc +++ b/src/wasm/module-compiler.cc @@ -376,156 +376,37 @@ MaybeHandle InstantiateToInstanceObject( return {}; } -// A helper class to prevent pathological patching behavior for indirect -// references to code which must be updated after lazy compiles. -// Utilizes a reverse mapping to prevent O(n^2) behavior. -class IndirectPatcher { - public: - void Patch(Handle caller_instance, - Handle target_instance, int func_index, - Address old_target, Address new_target) { - TRACE_LAZY( - "IndirectPatcher::Patch(caller=%p, target=%p, func_index=%i, " - "old_target=%" PRIuPTR ", new_target=%" PRIuPTR ")\n", - *caller_instance, *target_instance, func_index, old_target, new_target); - if (mapping_.size() == 0 || misses_ >= kMaxMisses) { - BuildMapping(caller_instance); - } - // Patch entries for the given function index. - WasmCodeManager* code_manager = - caller_instance->GetIsolate()->wasm_engine()->code_manager(); - USE(code_manager); - auto& entries = mapping_[func_index]; - int patched = 0; - for (auto index : entries) { - if (index < 0) { - // Imported function entry. - int i = -1 - index; - ImportedFunctionEntry entry(caller_instance, i); - if (entry.target() == old_target) { - DCHECK_EQ( - func_index, - code_manager->GetCodeFromStartAddress(entry.target())->index()); - entry.set_wasm_to_wasm(*target_instance, new_target); - patched++; - } - } else { - // Indirect function table entry. - int i = index; - IndirectFunctionTableEntry entry(caller_instance, i); - if (entry.target() == old_target) { - DCHECK_EQ( - func_index, - code_manager->GetCodeFromStartAddress(entry.target())->index()); - entry.set(entry.sig_id(), *target_instance, new_target); - patched++; - } - } - } - if (patched == 0) misses_++; - } - - private: - void BuildMapping(Handle caller_instance) { - mapping_.clear(); - misses_ = 0; - TRACE_LAZY("BuildMapping for (caller=%p)...\n", *caller_instance); - Isolate* isolate = caller_instance->GetIsolate(); - WasmCodeManager* code_manager = isolate->wasm_engine()->code_manager(); - uint32_t num_imported_functions = - caller_instance->module()->num_imported_functions; - // Process the imported function entries. - for (unsigned i = 0; i < num_imported_functions; i++) { - ImportedFunctionEntry entry(caller_instance, i); - WasmCode* code = code_manager->GetCodeFromStartAddress(entry.target()); - if (code->kind() != WasmCode::kLazyStub) continue; - TRACE_LAZY(" +import[%u] -> #%d (%p)\n", i, code->index(), - code->instructions().start()); - DCHECK(!entry.is_js_receiver_entry()); - WasmInstanceObject* target_instance = entry.instance(); - WasmCode* new_code = - target_instance->compiled_module()->GetNativeModule()->code( - code->index()); - if (new_code->kind() != WasmCode::kLazyStub) { - // Patch an imported function entry which is already compiled. - entry.set_wasm_to_wasm(target_instance, new_code->instruction_start()); - } else { - int key = code->index(); - int index = -1 - i; - mapping_[key].push_back(index); - } - } - // Process the indirect function table entries. - size_t ift_size = caller_instance->indirect_function_table_size(); - for (unsigned i = 0; i < ift_size; i++) { - IndirectFunctionTableEntry entry(caller_instance, i); - if (entry.target() == kNullAddress) continue; // null IFT entry - WasmCode* code = code_manager->GetCodeFromStartAddress(entry.target()); - if (code->kind() != WasmCode::kLazyStub) continue; - TRACE_LAZY(" +indirect[%u] -> #%d (lazy:%p)\n", i, code->index(), - code->instructions().start()); - WasmInstanceObject* target_instance = entry.instance(); - WasmCode* new_code = - target_instance->compiled_module()->GetNativeModule()->code( - code->index()); - if (new_code->kind() != WasmCode::kLazyStub) { - // Patch an indirect function table entry which is already compiled. - entry.set(entry.sig_id(), target_instance, - new_code->instruction_start()); - } else { - int key = code->index(); - int index = i; - mapping_[key].push_back(index); - } - } - } - - static constexpr int kMaxMisses = 5; // maximum misses before rebuilding - std::unordered_map> mapping_; - int misses_ = 0; -}; - -ModuleEnv CreateModuleEnvFromModuleObject( - Isolate* isolate, Handle module_object) { - WasmModule* module = module_object->module(); +ModuleEnv CreateModuleEnvFromNativeModule(NativeModule* native_module) { + WasmModule* module = native_module->module_object()->module(); wasm::UseTrapHandler use_trap_handler = - module_object->compiled_module()->GetNativeModule()->use_trap_handler() - ? kUseTrapHandler - : kNoTrapHandler; + native_module->use_trap_handler() ? kUseTrapHandler : kNoTrapHandler; return ModuleEnv(module, use_trap_handler, wasm::kRuntimeExceptionSupport); } -const wasm::WasmCode* LazyCompileFunction( - Isolate* isolate, Handle module_object, int func_index) { +wasm::WasmCode* LazyCompileFunction(Isolate* isolate, + NativeModule* native_module, + int func_index) { base::ElapsedTimer compilation_timer; - NativeModule* native_module = - module_object->compiled_module()->GetNativeModule(); - wasm::WasmCode* existing_code = - native_module->code(static_cast(func_index)); - if (existing_code != nullptr && - existing_code->kind() == wasm::WasmCode::kFunction) { - TRACE_LAZY("Function %d already compiled.\n", func_index); - return existing_code; - } + DCHECK(!native_module->has_code(static_cast(func_index))); compilation_timer.Start(); // TODO(wasm): Refactor this to only get the name if it is really needed for // tracing / debugging. std::string func_name; { - WasmName name = - Vector::cast(module_object->GetRawFunctionName(func_index)); + WasmName name = Vector::cast( + native_module->module_object()->GetRawFunctionName(func_index)); // Copy to std::string, because the underlying string object might move on // the heap. func_name.assign(name.start(), static_cast(name.length())); } - TRACE_LAZY("Compiling function %s, %d.\n", func_name.c_str(), func_index); + TRACE_LAZY("Compiling function '%s' (#%d).\n", func_name.c_str(), func_index); - ModuleEnv module_env = - CreateModuleEnvFromModuleObject(isolate, module_object); + ModuleEnv module_env = CreateModuleEnvFromNativeModule(native_module); - const uint8_t* module_start = module_object->module_bytes()->GetChars(); + const uint8_t* module_start = + native_module->module_object()->module_bytes()->GetChars(); const WasmFunction* func = &module_env.module->functions[func_index]; FunctionBody body{func->sig, func->code.offset(), @@ -572,292 +453,19 @@ const wasm::WasmCode* LazyCompileFunction( return wasm_code; } -namespace { - -int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator, - int offset) { - DCHECK(!iterator.done()); - int byte_pos; - do { - byte_pos = iterator.source_position().ScriptOffset(); - iterator.Advance(); - } while (!iterator.done() && iterator.code_offset() <= offset); - return byte_pos; -} - -const wasm::WasmCode* LazyCompileFromJsToWasm( - Isolate* isolate, Handle instance, - Handle js_to_wasm_caller, uint32_t callee_func_index) { - Decoder decoder(nullptr, nullptr); - Handle module_object(instance->module_object()); - NativeModule* native_module = instance->compiled_module()->GetNativeModule(); - - TRACE_LAZY( - "Starting lazy compilation (func %u, js_to_wasm: true, patch caller: " - "true). \n", - callee_func_index); - LazyCompileFunction(isolate, module_object, callee_func_index); - { - DisallowHeapAllocation no_gc; - CodeSpaceMemoryModificationScope modification_scope(isolate->heap()); - RelocIterator it(*js_to_wasm_caller, - RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); - DCHECK(!it.done()); - const wasm::WasmCode* callee_compiled = - native_module->code(callee_func_index); - DCHECK_NOT_NULL(callee_compiled); - DCHECK_EQ(WasmCode::kLazyStub, - isolate->wasm_engine() - ->code_manager() - ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address()) - ->kind()); - it.rinfo()->set_js_to_wasm_address(callee_compiled->instruction_start()); - TRACE_LAZY("Patched 1 location in js-to-wasm %p.\n", *js_to_wasm_caller); - -#ifdef DEBUG - it.next(); - DCHECK(it.done()); -#endif - } - - wasm::WasmCode* ret = native_module->code(callee_func_index); - DCHECK_NOT_NULL(ret); - DCHECK_EQ(wasm::WasmCode::kFunction, ret->kind()); - return ret; -} - -const wasm::WasmCode* LazyCompileIndirectCall( - Isolate* isolate, Handle instance, - uint32_t func_index) { - TRACE_LAZY( - "Starting lazy compilation (func %u, js_to_wasm: false, patch caller: " - "false). \n", - func_index); - Handle module_object(instance->module_object()); - return LazyCompileFunction(isolate, module_object, func_index); -} - -const wasm::WasmCode* LazyCompileDirectCall(Isolate* isolate, - Handle instance, - const wasm::WasmCode* wasm_caller, - int32_t caller_ret_offset) { - DCHECK_LE(0, caller_ret_offset); - - Decoder decoder(nullptr, nullptr); - - // Gather all the targets of direct calls inside the code of {wasm_caller} - // and place their function indexes in {direct_callees}. - std::vector direct_callees; - // The last one before {caller_ret_offset} must be the call that triggered - // this lazy compilation. - int callee_pos = -1; - uint32_t num_non_compiled_callees = 0; // For stats. - { - DisallowHeapAllocation no_gc; - WasmModuleObject* module_object = instance->module_object(); - SeqOneByteString* module_bytes = module_object->module_bytes(); - uint32_t caller_func_index = wasm_caller->index(); - SourcePositionTableIterator source_pos_iterator( - wasm_caller->source_positions()); - - const byte* func_bytes = - module_bytes->GetChars() + - module_object->module()->functions[caller_func_index].code.offset(); - for (RelocIterator it(wasm_caller->instructions(), - wasm_caller->reloc_info(), - wasm_caller->constant_pool(), - RelocInfo::ModeMask(RelocInfo::WASM_CALL)); - !it.done(); it.next()) { - // TODO(clemensh): Introduce safe_cast which (D)CHECKS - // (depending on the bool) against limits of T and then static_casts. - size_t offset_l = it.rinfo()->pc() - wasm_caller->instruction_start(); - DCHECK_GE(kMaxInt, offset_l); - int offset = static_cast(offset_l); - int byte_pos = - AdvanceSourcePositionTableIterator(source_pos_iterator, offset); - - WasmCode* callee = isolate->wasm_engine()->code_manager()->LookupCode( - it.rinfo()->target_address()); - if (callee->kind() == WasmCode::kLazyStub) { - // The callee has not been compiled. - ++num_non_compiled_callees; - int32_t callee_func_index = - ExtractDirectCallIndex(decoder, func_bytes + byte_pos); - DCHECK_LT(callee_func_index, - wasm_caller->native_module()->num_functions()); - // {caller_ret_offset} points to one instruction after the call. - // Remember the last called function before that offset. - if (offset < caller_ret_offset) { - callee_pos = static_cast(direct_callees.size()); - } - direct_callees.push_back(callee_func_index); - } else { - // If the callee is not the lazy compile stub, assume this callee - // has already been compiled. - direct_callees.push_back(-1); - continue; - } - } - - TRACE_LAZY("Found %d non-compiled callees in function=%p.\n", - num_non_compiled_callees, wasm_caller); - USE(num_non_compiled_callees); - } - CHECK_LE(0, callee_pos); - - // TODO(wasm): compile all functions in non_compiled_callees in - // background, wait for direct_callees[callee_pos]. - auto callee_func_index = direct_callees[callee_pos]; - TRACE_LAZY( - "Starting lazy compilation (function=%p retaddr=+%d direct_callees[%d] " - "-> %d).\n", - wasm_caller, caller_ret_offset, callee_pos, callee_func_index); - - Handle module_object(instance->module_object()); - NativeModule* native_module = instance->compiled_module()->GetNativeModule(); - const WasmCode* ret = - LazyCompileFunction(isolate, module_object, callee_func_index); - DCHECK_NOT_NULL(ret); - - int patched = 0; - { - // Now patch the code in {wasm_caller} with all functions which are now - // compiled. This will pick up any other compiled functions, not only {ret}. - size_t pos = 0; - for (RelocIterator - it(wasm_caller->instructions(), wasm_caller->reloc_info(), - wasm_caller->constant_pool(), - RelocInfo::ModeMask(RelocInfo::WASM_CALL)); - !it.done(); it.next(), ++pos) { - auto callee_index = direct_callees[pos]; - if (callee_index < 0) continue; // callee already compiled. - const WasmCode* callee_compiled = native_module->code(callee_index); - if (callee_compiled->kind() != WasmCode::kFunction) continue; - DCHECK_EQ(WasmCode::kLazyStub, - isolate->wasm_engine() - ->code_manager() - ->GetCodeFromStartAddress(it.rinfo()->wasm_call_address()) - ->kind()); - it.rinfo()->set_wasm_call_address(callee_compiled->instruction_start()); - ++patched; - } - DCHECK_EQ(direct_callees.size(), pos); - } - - DCHECK_LT(0, patched); - TRACE_LAZY("Patched %d calls(s) in %p.\n", patched, wasm_caller); - USE(patched); - - return ret; -} - -} // namespace - -Address CompileLazy(Isolate* isolate, - Handle target_instance) { +Address CompileLazy(Isolate* isolate, NativeModule* native_module, + uint32_t func_index) { HistogramTimerScope lazy_time_scope( isolate->counters()->wasm_lazy_compilation_time()); - //========================================================================== - // Begin stack walk. - //========================================================================== - StackFrameIterator it(isolate); - - //========================================================================== - // First frame: C entry stub. - //========================================================================== - DCHECK(!it.done()); - DCHECK_EQ(StackFrame::EXIT, it.frame()->type()); - it.Advance(); - - //========================================================================== - // Second frame: WasmCompileLazy builtin. - //========================================================================== - DCHECK(!it.done()); - int target_func_index = -1; - bool indirectly_called = false; - const wasm::WasmCode* lazy_stub = - isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc()); - CHECK_EQ(wasm::WasmCode::kLazyStub, lazy_stub->kind()); - if (!lazy_stub->IsAnonymous()) { - // If the lazy stub is not "anonymous", then its copy encodes the target - // function index. Used for import and indirect calls. - target_func_index = lazy_stub->index(); - indirectly_called = true; - } - it.Advance(); - - //========================================================================== - // Third frame: The calling wasm code (direct or indirect), or js-to-wasm - // wrapper. - //========================================================================== - DCHECK(!it.done()); - DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled()); - Handle js_to_wasm_caller_code; - Handle caller_instance; - const WasmCode* wasm_caller_code = nullptr; - int32_t caller_ret_offset = -1; - if (it.frame()->is_js_to_wasm()) { - js_to_wasm_caller_code = handle(it.frame()->LookupCode(), isolate); - // This wasn't actually an indirect call, but a JS->wasm call. - indirectly_called = false; - } else { - caller_instance = - handle(WasmCompiledFrame::cast(it.frame())->wasm_instance(), isolate); - wasm_caller_code = - isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc()); - auto offset = it.frame()->pc() - wasm_caller_code->instruction_start(); - caller_ret_offset = static_cast(offset); - DCHECK_EQ(offset, caller_ret_offset); - } - - //========================================================================== - // Begin compilation. - //========================================================================== - Handle compiled_module( - target_instance->compiled_module()); - - NativeModule* native_module = compiled_module->GetNativeModule(); DCHECK(!native_module->lazy_compile_frozen()); NativeModuleModificationScope native_module_modification_scope(native_module); - const wasm::WasmCode* result = nullptr; - - if (!js_to_wasm_caller_code.is_null()) { - result = LazyCompileFromJsToWasm(isolate, target_instance, - js_to_wasm_caller_code, target_func_index); - DCHECK_NOT_NULL(result); - DCHECK_EQ(target_func_index, result->index()); - } else { - DCHECK_NOT_NULL(wasm_caller_code); - if (target_func_index < 0) { - result = LazyCompileDirectCall(isolate, target_instance, wasm_caller_code, - caller_ret_offset); - DCHECK_NOT_NULL(result); - } else { - result = - LazyCompileIndirectCall(isolate, target_instance, target_func_index); - DCHECK_NOT_NULL(result); - } - } - - //========================================================================== - // Update import and indirect function tables in the caller. - //========================================================================== - if (indirectly_called) { - DCHECK(!caller_instance.is_null()); - if (!caller_instance->has_managed_indirect_patcher()) { - auto patcher = Managed::Allocate(isolate, 0); - caller_instance->set_managed_indirect_patcher(*patcher); - } - IndirectPatcher* patcher = Managed::cast( - caller_instance->managed_indirect_patcher()) - ->raw(); - Address old_target = lazy_stub->instruction_start(); - patcher->Patch(caller_instance, target_instance, target_func_index, - old_target, result->instruction_start()); - } + wasm::WasmCode* result = + LazyCompileFunction(isolate, native_module, func_index); + DCHECK_NOT_NULL(result); + DCHECK_EQ(func_index, result->index()); return result->instruction_start(); } @@ -879,15 +487,6 @@ void FlushICache(const wasm::NativeModule* native_module) { } } -void FlushICache(Handle functions) { - for (int i = 0, e = functions->length(); i < e; ++i) { - if (!functions->get(i)->IsCode()) continue; - Code* code = Code::cast(functions->get(i)); - Assembler::FlushICache(code->raw_instruction_start(), - code->raw_instruction_size()); - } -} - byte* raw_buffer_ptr(MaybeHandle buffer, int offset) { return static_cast(buffer.ToHandleChecked()->backing_store()) + offset; } @@ -1087,24 +686,6 @@ void FinishCompilationUnits(CompilationState* compilation_state, } } -void UpdateAllCompiledModulesWithTopTierCode( - Handle module_object) { - WasmModule* module = module_object->module(); - DCHECK_GT(module->functions.size() - module->num_imported_functions, 0); - USE(module); - - CodeSpaceMemoryModificationScope modification_scope( - module_object->GetIsolate()->heap()); - - NativeModule* native_module = - module_object->compiled_module()->GetNativeModule(); - - // Link. - CodeSpecialization code_specialization; - code_specialization.RelocateDirectCalls(native_module); - code_specialization.ApplyToWholeModule(native_module, module_object); -} - void CompileInParallel(Isolate* isolate, NativeModule* native_module, const ModuleWireBytes& wire_bytes, ModuleEnv* module_env, Handle module_object, @@ -1145,53 +726,6 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module, compilation_state->SetNumberOfFunctionsToCompile(functions_count); compilation_state->SetWireBytes(wire_bytes); - DeferredHandles* deferred_handles = nullptr; - Handle module_object_deferred; - if (compilation_state->compile_mode() == CompileMode::kTiering) { - // Open a deferred handle scope for the module_object, in order to allow - // for background tiering compilation. - DeferredHandleScope deferred(isolate); - module_object_deferred = handle(*module_object, isolate); - deferred_handles = deferred.Detach(); - } - compilation_state->AddCallback( - [module_object_deferred, deferred_handles]( - // Callback is called from a foreground thread. - CompilationEvent event, ErrorThrower* thrower) mutable { - switch (event) { - case CompilationEvent::kFinishedBaselineCompilation: - // Nothing to do, since we are finishing baseline compilation - // in this foreground thread. - return; - case CompilationEvent::kFinishedTopTierCompilation: - UpdateAllCompiledModulesWithTopTierCode(module_object_deferred); - // TODO(wasm): Currently compilation has to finish before the - // {deferred_handles} can be removed. We need to make sure that - // we can clean it up at a time when the native module - // should die (but currently cannot, since it's kept alive - // through the {deferred_handles} themselves). - delete deferred_handles; - deferred_handles = nullptr; - return; - case CompilationEvent::kFailedCompilation: - // If baseline compilation failed, we will reflect this without - // a callback, in this thread through {thrower}. - // Tier-up compilation should not fail if baseline compilation - // did not fail. - DCHECK(!module_object_deferred->compiled_module() - ->GetNativeModule() - ->compilation_state() - ->baseline_compilation_finished()); - delete deferred_handles; - deferred_handles = nullptr; - return; - case CompilationEvent::kDestroyed: - if (deferred_handles) delete deferred_handles; - return; - } - UNREACHABLE(); - }); - // 1) The main thread allocates a compilation unit for each wasm function // and stores them in the vector {compilation_units} within the // {compilation_state}. By adding units to the {compilation_state}, new @@ -1811,10 +1345,8 @@ MaybeHandle InstanceBuilder::Build() { //-------------------------------------------------------------------------- CodeSpecialization code_specialization; code_specialization.RelocateDirectCalls(native_module); - code_specialization.ApplyToWholeModule(native_module, module_object_, - SKIP_ICACHE_FLUSH); + code_specialization.ApplyToWholeModule(native_module, SKIP_ICACHE_FLUSH); FlushICache(native_module); - FlushICache(handle(module_object_->export_wrappers(), isolate_)); //-------------------------------------------------------------------------- // Insert the compiled module into the weak list of compiled modules. @@ -1855,7 +1387,6 @@ MaybeHandle InstanceBuilder::Build() { //-------------------------------------------------------------------------- if (module_->start_function_index >= 0) { int start_index = module_->start_function_index; - Handle start_function_instance = instance; Address start_call_address = static_cast(start_index) < module_->num_imported_functions ? kNullAddress @@ -1866,7 +1397,7 @@ MaybeHandle InstanceBuilder::Build() { // TODO(clemensh): Don't generate an exported function for the start // function. Use CWasmEntry instead. start_function_ = WasmExportedFunction::New( - isolate_, start_function_instance, MaybeHandle(), start_index, + isolate_, instance, MaybeHandle(), start_index, static_cast(sig->parameter_count()), wrapper_code); } @@ -2119,15 +1650,14 @@ int InstanceBuilder::ProcessImports(Handle instance) { int num_imported_mutable_globals = 0; DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size()); - for (int index = 0; index < static_cast(module_->import_table.size()); - ++index) { + int num_imports = static_cast(module_->import_table.size()); + NativeModule* native_module = instance->compiled_module()->GetNativeModule(); + for (int index = 0; index < num_imports; ++index) { WasmImport& import = module_->import_table[index]; Handle module_name = sanitized_imports_[index].module_name; Handle import_name = sanitized_imports_[index].import_name; Handle value = sanitized_imports_[index].value; - NativeModule* native_module = - instance->compiled_module()->GetNativeModule(); switch (import.kind) { case kExternalFunction: { @@ -2157,8 +1687,8 @@ int InstanceBuilder::ProcessImports(Handle instance) { return -1; } // The import reference is the instance object itself. - ImportedFunctionEntry entry(instance, func_index); Address imported_target = imported_function->GetWasmCallTarget(); + ImportedFunctionEntry entry(instance, func_index); entry.set_wasm_to_wasm(*imported_instance, imported_target); } else { // The imported function is a callable. @@ -3126,17 +2656,17 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep { } return; case CompilationEvent::kFinishedTopTierCompilation: - // It is only safe to schedule the UpdateToTopTierCompiledCode - // step if no foreground task is currently pending, and no - // finisher is outstanding (streaming compilation). + // It is only safe to remove the AsyncCompileJob if no + // foreground task is currently pending, and no finisher is + // outstanding (streaming compilation). if (job->num_pending_foreground_tasks_ == 0 && job->outstanding_finishers_.Value() == 0) { - job->DoSync(); + job->isolate_->wasm_engine()->RemoveCompileJob(job); + } else { + // If a foreground task was pending or a finsher was pending, + // we will rely on FinishModule to remove the job. + job->tiering_completed_ = true; } - // If a foreground task was pending or a finsher was pending, - // we will rely on FinishModule to switch the step to - // UpdateToTopTierCompiledCode. - job->tiering_completed_ = true; return; case CompilationEvent::kFailedCompilation: { // Tier-up compilation should not fail if baseline compilation @@ -3238,23 +2768,11 @@ class AsyncCompileJob::FinishModule : public CompileStep { ->compilation_state() ->compile_mode()); if (job_->tiering_completed_) { - job_->DoSync(); + job_->isolate_->wasm_engine()->RemoveCompileJob(job_); } } }; -//========================================================================== -// Step 7 (sync): Update with top tier code. -//========================================================================== -class AsyncCompileJob::UpdateToTopTierCompiledCode : public CompileStep { - void RunInForeground() override { - TRACE_COMPILE("(7) Update native module to use optimized code...\n"); - - UpdateAllCompiledModulesWithTopTierCode(job_->module_object_); - job_->isolate_->wasm_engine()->RemoveCompileJob(job_); - } -}; - class AsyncCompileJob::AbortCompilation : public CompileStep { void RunInForeground() override { TRACE_COMPILE("Abort asynchronous compilation ...\n"); diff --git a/src/wasm/module-compiler.h b/src/wasm/module-compiler.h index f9c6a1644f..c9bfa44d1c 100644 --- a/src/wasm/module-compiler.h +++ b/src/wasm/module-compiler.h @@ -65,15 +65,8 @@ V8_EXPORT_PRIVATE Handle