Reland "[wasm] Introduce jump table"

This is a reland of 733b7c8258.
The arm64 bug was fixed in https://crrev.com/c/1105051.

Original change's description:
> [wasm] Introduce jump table
>
> This introduces the concept of a jump table for WebAssembly, which is
> used for every direct and indirect call to any WebAssembly function.
> For lazy compilation, it will initially contain code to call the
> WasmCompileLazy builtin, where it passes the function index to be
> called.
> For non-lazy-compilation, it will contain a jump to the actual code.
> The jump table allows to easily redirect functions for lazy
> compilation, tier-up, debugging and (in the future) code aging. After
> this CL, we will not need to patch existing code any more for any of
> these operations.
>
> R=mstarzinger@chromium.org, titzer@chromium.org
>
> Bug: v8:7758
> Change-Id: I45f9983c2b06ae81bf5ce9847f4542fb48844a4f
> Reviewed-on: https://chromium-review.googlesource.com/1097075
> Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
> Reviewed-by: Ben Titzer <titzer@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#53805}

TBR=titzer@chromium.org,mstarzinger@chromium.org

Bug: v8:7758
Change-Id: I68555230c6db97e70f0b8fef784188f55ee04794
Reviewed-on: https://chromium-review.googlesource.com/1105158
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53829}
This commit is contained in:
Clemens Hammacher 2018-06-19 11:47:17 +02:00 committed by Commit Bot
parent 58339dfe39
commit 5f56641b41
26 changed files with 426 additions and 809 deletions

View File

@ -1153,6 +1153,7 @@ int Operand::InstructionsRequired(const Assembler* assembler,
void Assembler::Move32BitImmediate(Register rd, const Operand& x, void Assembler::Move32BitImmediate(Register rd, const Operand& x,
Condition cond) { Condition cond) {
if (UseMovImmediateLoad(x, this)) { if (UseMovImmediateLoad(x, this)) {
CpuFeatureScope scope(this, ARMv7);
// UseMovImmediateLoad should return false when we need to output // UseMovImmediateLoad should return false when we need to output
// relocation info, since we prefer the constant pool for values that // relocation info, since we prefer the constant pool for values that
// can be patched. // can be patched.
@ -1160,12 +1161,9 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
// Re-use the destination register as a scratch if possible. // Re-use the destination register as a scratch if possible.
Register target = rd != pc ? rd : temps.Acquire(); Register target = rd != pc ? rd : temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
uint32_t imm32 = static_cast<uint32_t>(x.immediate()); uint32_t imm32 = static_cast<uint32_t>(x.immediate());
CpuFeatureScope scope(this, ARMv7);
movw(target, imm32 & 0xFFFF, cond); movw(target, imm32 & 0xFFFF, cond);
movt(target, imm32 >> 16, cond); movt(target, imm32 >> 16, cond);
}
if (target.code() != rd.code()) { if (target.code() != rd.code()) {
mov(rd, target, LeaveCC, cond); mov(rd, target, LeaveCC, cond);
} }

View File

@ -1549,6 +1549,9 @@ class Assembler : public AssemblerBase {
UNREACHABLE(); UNREACHABLE();
} }
// Move a 32-bit immediate into a register, potentially via the constant pool.
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
protected: protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; } int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@ -1680,9 +1683,6 @@ class Assembler : public AssemblerBase {
inline void CheckBuffer(); inline void CheckBuffer();
void GrowBuffer(); void GrowBuffer();
// 32-bit immediate values
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
// Instruction generation // Instruction generation
void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x); void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
// Attempt to encode operand |x| for instruction |instr| and return true on // Attempt to encode operand |x| for instruction |instr| and return true on

View File

@ -2294,6 +2294,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
} }
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in r4 by the jump table trampoline.
// Convert to Smi for the runtime call.
__ SmiTag(r4, r4);
{ {
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@ -2308,8 +2311,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ stm(db_w, sp, gp_regs); __ stm(db_w, sp, gp_regs);
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg); __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
// Pass the WASM instance as an explicit argument to WasmCompileLazy. // Pass instance and function index as explicit arguments to the runtime
// function.
__ push(kWasmInstanceRegister); __ push(kWasmInstanceRegister);
__ push(r4);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ ldr(r2, FieldMemOperand(kWasmInstanceRegister, __ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));

View File

@ -2746,6 +2746,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
} }
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in w8 by the jump table trampoline.
// Sign extend and convert to Smi for the runtime call.
__ sxtw(x8, w8);
__ SmiTag(x8, x8);
{ {
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@ -2760,8 +2764,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ PushXRegList(gp_regs); __ PushXRegList(gp_regs);
__ PushDRegList(fp_regs); __ PushDRegList(fp_regs);
// Pass the WASM instance as an explicit argument to WasmCompileLazy. // Pass instance and function index as explicit arguments to the runtime
__ PushArgument(kWasmInstanceRegister); // function.
__ Push(kWasmInstanceRegister, x8);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ Ldr(x2, FieldMemOperand(kWasmInstanceRegister, __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));

View File

@ -2481,6 +2481,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
} }
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in edi by the jump table trampoline.
// Convert to Smi for the runtime call.
__ SmiTag(edi);
{ {
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@ -2504,8 +2507,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size; offset += kSimd128Size;
} }
// Pass the WASM instance as an explicit argument to WasmCompileLazy. // Push the WASM instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister); __ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(edi);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ mov(ecx, FieldOperand(kWasmInstanceRegister, __ mov(ecx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));

View File

@ -2423,6 +2423,10 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
} }
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was pushed to the stack by the caller as int32.
__ Pop(r11);
// Convert to Smi for the runtime call.
__ SmiTag(r11, r11);
{ {
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@ -2446,8 +2450,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size; offset += kSimd128Size;
} }
// Pass the WASM instance as an explicit argument to WasmCompileLazy. // Push the WASM instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister); __ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(r11);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ movp(rcx, FieldOperand(kWasmInstanceRegister, __ movp(rcx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));

View File

@ -3216,6 +3216,12 @@ void Assembler::GrowBuffer() {
*p += pc_delta; *p += pc_delta;
} }
// Relocate js-to-wasm calls (which are encoded pc-relative).
for (RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
!it.done(); it.next()) {
it.rinfo()->apply(pc_delta);
}
DCHECK(!buffer_overflow()); DCHECK(!buffer_overflow());
} }

View File

@ -1739,10 +1739,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT
os << "\n - managed_native_allocations: " os << "\n - managed_native_allocations: "
<< Brief(managed_native_allocations()); << Brief(managed_native_allocations());
} }
if (has_managed_indirect_patcher()) {
os << "\n - managed_indirect_patcher: "
<< Brief(managed_indirect_patcher());
}
os << "\n - memory_start: " << static_cast<void*>(memory_start()); os << "\n - memory_start: " << static_cast<void*>(memory_start());
os << "\n - memory_size: " << memory_size(); os << "\n - memory_size: " << memory_size();
os << "\n - memory_mask: " << AsHex(memory_mask()); os << "\n - memory_mask: " << AsHex(memory_mask());

View File

@ -291,8 +291,9 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
RUNTIME_FUNCTION(Runtime_WasmCompileLazy) { RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
HandleScope scope(isolate); HandleScope scope(isolate);
DCHECK_EQ(1, args.length()); DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0); CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_SMI_ARG_CHECKED(func_index, 1);
ClearThreadInWasmScope wasm_flag(true); ClearThreadInWasmScope wasm_flag(true);
@ -306,7 +307,8 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
DCHECK_EQ(*instance, WasmCompileLazyFrame::cast(it.frame())->wasm_instance()); DCHECK_EQ(*instance, WasmCompileLazyFrame::cast(it.frame())->wasm_instance());
#endif #endif
Address entrypoint = wasm::CompileLazy(isolate, instance); Address entrypoint = wasm::CompileLazy(
isolate, instance->compiled_module()->GetNativeModule(), func_index);
return reinterpret_cast<Object*>(entrypoint); return reinterpret_cast<Object*>(entrypoint);
} }

View File

@ -581,7 +581,7 @@ namespace internal {
F(WasmThrow, 0, 1) \ F(WasmThrow, 0, 1) \
F(WasmThrowCreate, 2, 1) \ F(WasmThrowCreate, 2, 1) \
F(WasmThrowTypeError, 0, 1) \ F(WasmThrowTypeError, 0, 1) \
F(WasmCompileLazy, 1, 1) F(WasmCompileLazy, 2, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \ #define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
F(DebugBreakOnBytecode, 1, 2) \ F(DebugBreakOnBytecode, 1, 2) \

View File

@ -4,6 +4,7 @@
#include "src/wasm/jump-table-assembler.h" #include "src/wasm/jump-table-assembler.h"
#include "src/assembler-inl.h"
#include "src/macro-assembler-inl.h" #include "src/macro-assembler-inl.h"
namespace v8 { namespace v8 {
@ -27,6 +28,122 @@ void JumpTableAssembler::EmitJumpTrampoline(Address target) {
#endif #endif
} }
// The implementation is compact enough to implement it inline here. If it gets
// much bigger, we might want to split it in a separate file per architecture.
#if V8_TARGET_ARCH_X64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// TODO(clemensh): Try more efficient sequences.
// Alternative 1:
// [header]: mov r10, [lazy_compile_target]
// jmp r10
// [slot 0]: push [0]
// jmp [header] // pc-relative --> slot size: 10 bytes
//
// Alternative 2:
// [header]: lea r10, [rip - [header]]
// shr r10, 3 // compute index from offset
// push r10
// mov r10, [lazy_compile_target]
// jmp r10
// [slot 0]: call [header]
// ret // -> slot size: 5 bytes
// Use a push, because mov to an extended register takes 6 bytes.
pushq(Immediate(func_index)); // max 5 bytes
movq(kScratchRegister, uint64_t{lazy_compile_target}); // max 10 bytes
jmp(kScratchRegister); // 3 bytes
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
movq(kScratchRegister, static_cast<uint64_t>(target));
jmp(kScratchRegister);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
Nop(bytes);
}
#elif V8_TARGET_ARCH_IA32
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
mov(edi, func_index); // 5 bytes
jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
jmp(target, RelocInfo::NONE);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
Nop(bytes);
}
#elif V8_TARGET_ARCH_ARM
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// Load function index to r4.
// This generates <= 3 instructions: ldr, const pool start, constant
Move32BitImmediate(r4, Operand(func_index));
// Jump to {lazy_compile_target}.
int offset =
lazy_compile_target - reinterpret_cast<Address>(pc_) - kPcLoadDelta;
DCHECK_EQ(0, offset % kInstrSize);
DCHECK(is_int26(offset)); // 26 bit imm
b(offset); // 1 instr
CheckConstPool(true, false); // force emit of const pool
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
int offset = target - reinterpret_cast<Address>(pc_) - kPcLoadDelta;
DCHECK_EQ(0, offset % kInstrSize);
DCHECK(is_int26(offset)); // 26 bit imm
b(offset);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
DCHECK_EQ(0, bytes % kInstrSize);
for (; bytes > 0; bytes -= kInstrSize) {
nop();
}
}
#elif V8_TARGET_ARCH_ARM64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
Mov(w8, func_index); // max. 2 instr
Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
Jump(target, RelocInfo::NONE);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
DCHECK_EQ(0, bytes % kInstructionSize);
for (; bytes > 0; bytes -= kInstructionSize) {
nop();
}
}
#else
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
UNIMPLEMENTED();
}
void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
UNIMPLEMENTED();
}
#endif
} // namespace wasm } // namespace wasm
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -6,6 +6,7 @@
#define V8_WASM_JUMP_TABLE_ASSEMBLER_H_ #define V8_WASM_JUMP_TABLE_ASSEMBLER_H_
#include "src/macro-assembler.h" #include "src/macro-assembler.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -26,8 +27,42 @@ class JumpTableAssembler : public TurboAssembler {
public: public:
JumpTableAssembler() : TurboAssembler(GetDefaultIsolateData(), nullptr, 0) {} JumpTableAssembler() : TurboAssembler(GetDefaultIsolateData(), nullptr, 0) {}
// Instantiate a {JumpTableAssembler} for patching.
explicit JumpTableAssembler(Address slot_addr, int size = 256)
: TurboAssembler(GetDefaultIsolateData(),
reinterpret_cast<void*>(slot_addr), size) {}
// Emit a trampoline to a possibly far away code target. // Emit a trampoline to a possibly far away code target.
void EmitJumpTrampoline(Address target); void EmitJumpTrampoline(Address target);
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableSlotSize = 10;
#elif V8_TARGET_ARCH_ARM
static constexpr int kJumpTableSlotSize = 4 * kInstrSize;
#elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableSlotSize = 3 * kInstructionSize;
#else
static constexpr int kJumpTableSlotSize = 1;
#endif
void EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target);
void EmitJumpSlot(Address target);
void NopBytes(int bytes);
static void PatchJumpTableSlot(Address slot, Address new_target,
WasmCode::FlushICache flush_i_cache) {
JumpTableAssembler jsasm(slot);
jsasm.EmitJumpSlot(new_target);
jsasm.NopBytes(kJumpTableSlotSize - jsasm.pc_offset());
if (flush_i_cache) {
Assembler::FlushICache(slot, kJumpTableSlotSize);
}
}
}; };
} // namespace wasm } // namespace wasm

View File

@ -378,156 +378,37 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
return {}; return {};
} }
// A helper class to prevent pathological patching behavior for indirect ModuleEnv CreateModuleEnvFromNativeModule(NativeModule* native_module) {
// references to code which must be updated after lazy compiles. WasmModule* module = native_module->module_object()->module();
// Utilizes a reverse mapping to prevent O(n^2) behavior.
class IndirectPatcher {
public:
void Patch(Handle<WasmInstanceObject> caller_instance,
Handle<WasmInstanceObject> target_instance, int func_index,
Address old_target, Address new_target) {
TRACE_LAZY(
"IndirectPatcher::Patch(caller=%p, target=%p, func_index=%i, "
"old_target=%" PRIuPTR ", new_target=%" PRIuPTR ")\n",
*caller_instance, *target_instance, func_index, old_target, new_target);
if (mapping_.size() == 0 || misses_ >= kMaxMisses) {
BuildMapping(caller_instance);
}
// Patch entries for the given function index.
WasmCodeManager* code_manager =
caller_instance->GetIsolate()->wasm_engine()->code_manager();
USE(code_manager);
auto& entries = mapping_[func_index];
int patched = 0;
for (auto index : entries) {
if (index < 0) {
// Imported function entry.
int i = -1 - index;
ImportedFunctionEntry entry(caller_instance, i);
if (entry.target() == old_target) {
DCHECK_EQ(
func_index,
code_manager->GetCodeFromStartAddress(entry.target())->index());
entry.set_wasm_to_wasm(*target_instance, new_target);
patched++;
}
} else {
// Indirect function table entry.
int i = index;
IndirectFunctionTableEntry entry(caller_instance, i);
if (entry.target() == old_target) {
DCHECK_EQ(
func_index,
code_manager->GetCodeFromStartAddress(entry.target())->index());
entry.set(entry.sig_id(), *target_instance, new_target);
patched++;
}
}
}
if (patched == 0) misses_++;
}
private:
void BuildMapping(Handle<WasmInstanceObject> caller_instance) {
mapping_.clear();
misses_ = 0;
TRACE_LAZY("BuildMapping for (caller=%p)...\n", *caller_instance);
Isolate* isolate = caller_instance->GetIsolate();
WasmCodeManager* code_manager = isolate->wasm_engine()->code_manager();
uint32_t num_imported_functions =
caller_instance->module()->num_imported_functions;
// Process the imported function entries.
for (unsigned i = 0; i < num_imported_functions; i++) {
ImportedFunctionEntry entry(caller_instance, i);
WasmCode* code = code_manager->GetCodeFromStartAddress(entry.target());
if (code->kind() != WasmCode::kLazyStub) continue;
TRACE_LAZY(" +import[%u] -> #%d (%p)\n", i, code->index(),
code->instructions().start());
DCHECK(!entry.is_js_receiver_entry());
WasmInstanceObject* target_instance = entry.instance();
WasmCode* new_code =
target_instance->compiled_module()->GetNativeModule()->code(
code->index());
if (new_code->kind() != WasmCode::kLazyStub) {
// Patch an imported function entry which is already compiled.
entry.set_wasm_to_wasm(target_instance, new_code->instruction_start());
} else {
int key = code->index();
int index = -1 - i;
mapping_[key].push_back(index);
}
}
// Process the indirect function table entries.
size_t ift_size = caller_instance->indirect_function_table_size();
for (unsigned i = 0; i < ift_size; i++) {
IndirectFunctionTableEntry entry(caller_instance, i);
if (entry.target() == kNullAddress) continue; // null IFT entry
WasmCode* code = code_manager->GetCodeFromStartAddress(entry.target());
if (code->kind() != WasmCode::kLazyStub) continue;
TRACE_LAZY(" +indirect[%u] -> #%d (lazy:%p)\n", i, code->index(),
code->instructions().start());
WasmInstanceObject* target_instance = entry.instance();
WasmCode* new_code =
target_instance->compiled_module()->GetNativeModule()->code(
code->index());
if (new_code->kind() != WasmCode::kLazyStub) {
// Patch an indirect function table entry which is already compiled.
entry.set(entry.sig_id(), target_instance,
new_code->instruction_start());
} else {
int key = code->index();
int index = i;
mapping_[key].push_back(index);
}
}
}
static constexpr int kMaxMisses = 5; // maximum misses before rebuilding
std::unordered_map<int, std::vector<int>> mapping_;
int misses_ = 0;
};
ModuleEnv CreateModuleEnvFromModuleObject(
Isolate* isolate, Handle<WasmModuleObject> module_object) {
WasmModule* module = module_object->module();
wasm::UseTrapHandler use_trap_handler = wasm::UseTrapHandler use_trap_handler =
module_object->compiled_module()->GetNativeModule()->use_trap_handler() native_module->use_trap_handler() ? kUseTrapHandler : kNoTrapHandler;
? kUseTrapHandler
: kNoTrapHandler;
return ModuleEnv(module, use_trap_handler, wasm::kRuntimeExceptionSupport); return ModuleEnv(module, use_trap_handler, wasm::kRuntimeExceptionSupport);
} }
const wasm::WasmCode* LazyCompileFunction( wasm::WasmCode* LazyCompileFunction(Isolate* isolate,
Isolate* isolate, Handle<WasmModuleObject> module_object, int func_index) { NativeModule* native_module,
int func_index) {
base::ElapsedTimer compilation_timer; base::ElapsedTimer compilation_timer;
NativeModule* native_module = DCHECK(!native_module->has_code(static_cast<uint32_t>(func_index)));
module_object->compiled_module()->GetNativeModule();
wasm::WasmCode* existing_code =
native_module->code(static_cast<uint32_t>(func_index));
if (existing_code != nullptr &&
existing_code->kind() == wasm::WasmCode::kFunction) {
TRACE_LAZY("Function %d already compiled.\n", func_index);
return existing_code;
}
compilation_timer.Start(); compilation_timer.Start();
// TODO(wasm): Refactor this to only get the name if it is really needed for // TODO(wasm): Refactor this to only get the name if it is really needed for
// tracing / debugging. // tracing / debugging.
std::string func_name; std::string func_name;
{ {
WasmName name = WasmName name = Vector<const char>::cast(
Vector<const char>::cast(module_object->GetRawFunctionName(func_index)); native_module->module_object()->GetRawFunctionName(func_index));
// Copy to std::string, because the underlying string object might move on // Copy to std::string, because the underlying string object might move on
// the heap. // the heap.
func_name.assign(name.start(), static_cast<size_t>(name.length())); func_name.assign(name.start(), static_cast<size_t>(name.length()));
} }
TRACE_LAZY("Compiling function %s, %d.\n", func_name.c_str(), func_index); TRACE_LAZY("Compiling function '%s' (#%d).\n", func_name.c_str(), func_index);
ModuleEnv module_env = ModuleEnv module_env = CreateModuleEnvFromNativeModule(native_module);
CreateModuleEnvFromModuleObject(isolate, module_object);
const uint8_t* module_start = module_object->module_bytes()->GetChars(); const uint8_t* module_start =
native_module->module_object()->module_bytes()->GetChars();
const WasmFunction* func = &module_env.module->functions[func_index]; const WasmFunction* func = &module_env.module->functions[func_index];
FunctionBody body{func->sig, func->code.offset(), FunctionBody body{func->sig, func->code.offset(),
@ -574,292 +455,19 @@ const wasm::WasmCode* LazyCompileFunction(
return wasm_code; return wasm_code;
} }
namespace { Address CompileLazy(Isolate* isolate, NativeModule* native_module,
int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
int offset) {
DCHECK(!iterator.done());
int byte_pos;
do {
byte_pos = iterator.source_position().ScriptOffset();
iterator.Advance();
} while (!iterator.done() && iterator.code_offset() <= offset);
return byte_pos;
}
const wasm::WasmCode* LazyCompileFromJsToWasm(
Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<Code> js_to_wasm_caller, uint32_t callee_func_index) {
Decoder decoder(nullptr, nullptr);
Handle<WasmModuleObject> module_object(instance->module_object());
NativeModule* native_module = instance->compiled_module()->GetNativeModule();
TRACE_LAZY(
"Starting lazy compilation (func %u, js_to_wasm: true, patch caller: "
"true). \n",
callee_func_index);
LazyCompileFunction(isolate, module_object, callee_func_index);
{
DisallowHeapAllocation no_gc;
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
RelocIterator it(*js_to_wasm_caller,
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
DCHECK(!it.done());
const wasm::WasmCode* callee_compiled =
native_module->code(callee_func_index);
DCHECK_NOT_NULL(callee_compiled);
DCHECK_EQ(WasmCode::kLazyStub,
isolate->wasm_engine()
->code_manager()
->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address())
->kind());
it.rinfo()->set_js_to_wasm_address(callee_compiled->instruction_start());
TRACE_LAZY("Patched 1 location in js-to-wasm %p.\n", *js_to_wasm_caller);
#ifdef DEBUG
it.next();
DCHECK(it.done());
#endif
}
wasm::WasmCode* ret = native_module->code(callee_func_index);
DCHECK_NOT_NULL(ret);
DCHECK_EQ(wasm::WasmCode::kFunction, ret->kind());
return ret;
}
const wasm::WasmCode* LazyCompileIndirectCall(
Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t func_index) { uint32_t func_index) {
TRACE_LAZY(
"Starting lazy compilation (func %u, js_to_wasm: false, patch caller: "
"false). \n",
func_index);
Handle<WasmModuleObject> module_object(instance->module_object());
return LazyCompileFunction(isolate, module_object, func_index);
}
const wasm::WasmCode* LazyCompileDirectCall(Isolate* isolate,
Handle<WasmInstanceObject> instance,
const wasm::WasmCode* wasm_caller,
int32_t caller_ret_offset) {
DCHECK_LE(0, caller_ret_offset);
Decoder decoder(nullptr, nullptr);
// Gather all the targets of direct calls inside the code of {wasm_caller}
// and place their function indexes in {direct_callees}.
std::vector<int32_t> direct_callees;
// The last one before {caller_ret_offset} must be the call that triggered
// this lazy compilation.
int callee_pos = -1;
uint32_t num_non_compiled_callees = 0; // For stats.
{
DisallowHeapAllocation no_gc;
WasmModuleObject* module_object = instance->module_object();
SeqOneByteString* module_bytes = module_object->module_bytes();
uint32_t caller_func_index = wasm_caller->index();
SourcePositionTableIterator source_pos_iterator(
wasm_caller->source_positions());
const byte* func_bytes =
module_bytes->GetChars() +
module_object->module()->functions[caller_func_index].code.offset();
for (RelocIterator it(wasm_caller->instructions(),
wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
RelocInfo::ModeMask(RelocInfo::WASM_CALL));
!it.done(); it.next()) {
// TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
// (depending on the bool) against limits of T and then static_casts.
size_t offset_l = it.rinfo()->pc() - wasm_caller->instruction_start();
DCHECK_GE(kMaxInt, offset_l);
int offset = static_cast<int>(offset_l);
int byte_pos =
AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
WasmCode* callee = isolate->wasm_engine()->code_manager()->LookupCode(
it.rinfo()->target_address());
if (callee->kind() == WasmCode::kLazyStub) {
// The callee has not been compiled.
++num_non_compiled_callees;
int32_t callee_func_index =
ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
DCHECK_LT(callee_func_index,
wasm_caller->native_module()->num_functions());
// {caller_ret_offset} points to one instruction after the call.
// Remember the last called function before that offset.
if (offset < caller_ret_offset) {
callee_pos = static_cast<int>(direct_callees.size());
}
direct_callees.push_back(callee_func_index);
} else {
// If the callee is not the lazy compile stub, assume this callee
// has already been compiled.
direct_callees.push_back(-1);
continue;
}
}
TRACE_LAZY("Found %d non-compiled callees in function=%p.\n",
num_non_compiled_callees, wasm_caller);
USE(num_non_compiled_callees);
}
CHECK_LE(0, callee_pos);
// TODO(wasm): compile all functions in non_compiled_callees in
// background, wait for direct_callees[callee_pos].
auto callee_func_index = direct_callees[callee_pos];
TRACE_LAZY(
"Starting lazy compilation (function=%p retaddr=+%d direct_callees[%d] "
"-> %d).\n",
wasm_caller, caller_ret_offset, callee_pos, callee_func_index);
Handle<WasmModuleObject> module_object(instance->module_object());
NativeModule* native_module = instance->compiled_module()->GetNativeModule();
const WasmCode* ret =
LazyCompileFunction(isolate, module_object, callee_func_index);
DCHECK_NOT_NULL(ret);
int patched = 0;
{
// Now patch the code in {wasm_caller} with all functions which are now
// compiled. This will pick up any other compiled functions, not only {ret}.
size_t pos = 0;
for (RelocIterator
it(wasm_caller->instructions(), wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
RelocInfo::ModeMask(RelocInfo::WASM_CALL));
!it.done(); it.next(), ++pos) {
auto callee_index = direct_callees[pos];
if (callee_index < 0) continue; // callee already compiled.
const WasmCode* callee_compiled = native_module->code(callee_index);
if (callee_compiled->kind() != WasmCode::kFunction) continue;
DCHECK_EQ(WasmCode::kLazyStub,
isolate->wasm_engine()
->code_manager()
->GetCodeFromStartAddress(it.rinfo()->wasm_call_address())
->kind());
it.rinfo()->set_wasm_call_address(callee_compiled->instruction_start());
++patched;
}
DCHECK_EQ(direct_callees.size(), pos);
}
DCHECK_LT(0, patched);
TRACE_LAZY("Patched %d calls(s) in %p.\n", patched, wasm_caller);
USE(patched);
return ret;
}
} // namespace
Address CompileLazy(Isolate* isolate,
Handle<WasmInstanceObject> target_instance) {
HistogramTimerScope lazy_time_scope( HistogramTimerScope lazy_time_scope(
isolate->counters()->wasm_lazy_compilation_time()); isolate->counters()->wasm_lazy_compilation_time());
//==========================================================================
// Begin stack walk.
//==========================================================================
StackFrameIterator it(isolate);
//==========================================================================
// First frame: C entry stub.
//==========================================================================
DCHECK(!it.done());
DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
it.Advance();
//==========================================================================
// Second frame: WasmCompileLazy builtin.
//==========================================================================
DCHECK(!it.done());
int target_func_index = -1;
bool indirectly_called = false;
const wasm::WasmCode* lazy_stub =
isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
CHECK_EQ(wasm::WasmCode::kLazyStub, lazy_stub->kind());
if (!lazy_stub->IsAnonymous()) {
// If the lazy stub is not "anonymous", then its copy encodes the target
// function index. Used for import and indirect calls.
target_func_index = lazy_stub->index();
indirectly_called = true;
}
it.Advance();
//==========================================================================
// Third frame: The calling wasm code (direct or indirect), or js-to-wasm
// wrapper.
//==========================================================================
DCHECK(!it.done());
DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled());
Handle<Code> js_to_wasm_caller_code;
Handle<WasmInstanceObject> caller_instance;
const WasmCode* wasm_caller_code = nullptr;
int32_t caller_ret_offset = -1;
if (it.frame()->is_js_to_wasm()) {
js_to_wasm_caller_code = handle(it.frame()->LookupCode(), isolate);
// This wasn't actually an indirect call, but a JS->wasm call.
indirectly_called = false;
} else {
caller_instance =
handle(WasmCompiledFrame::cast(it.frame())->wasm_instance(), isolate);
wasm_caller_code =
isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
auto offset = it.frame()->pc() - wasm_caller_code->instruction_start();
caller_ret_offset = static_cast<int32_t>(offset);
DCHECK_EQ(offset, caller_ret_offset);
}
//==========================================================================
// Begin compilation.
//==========================================================================
Handle<WasmCompiledModule> compiled_module(
target_instance->compiled_module());
NativeModule* native_module = compiled_module->GetNativeModule();
DCHECK(!native_module->lazy_compile_frozen()); DCHECK(!native_module->lazy_compile_frozen());
NativeModuleModificationScope native_module_modification_scope(native_module); NativeModuleModificationScope native_module_modification_scope(native_module);
const wasm::WasmCode* result = nullptr; wasm::WasmCode* result =
LazyCompileFunction(isolate, native_module, func_index);
if (!js_to_wasm_caller_code.is_null()) {
result = LazyCompileFromJsToWasm(isolate, target_instance,
js_to_wasm_caller_code, target_func_index);
DCHECK_NOT_NULL(result); DCHECK_NOT_NULL(result);
DCHECK_EQ(target_func_index, result->index()); DCHECK_EQ(func_index, result->index());
} else {
DCHECK_NOT_NULL(wasm_caller_code);
if (target_func_index < 0) {
result = LazyCompileDirectCall(isolate, target_instance, wasm_caller_code,
caller_ret_offset);
DCHECK_NOT_NULL(result);
} else {
result =
LazyCompileIndirectCall(isolate, target_instance, target_func_index);
DCHECK_NOT_NULL(result);
}
}
//==========================================================================
// Update import and indirect function tables in the caller.
//==========================================================================
if (indirectly_called) {
DCHECK(!caller_instance.is_null());
if (!caller_instance->has_managed_indirect_patcher()) {
auto patcher = Managed<IndirectPatcher>::Allocate(isolate, 0);
caller_instance->set_managed_indirect_patcher(*patcher);
}
IndirectPatcher* patcher = Managed<IndirectPatcher>::cast(
caller_instance->managed_indirect_patcher())
->raw();
Address old_target = lazy_stub->instruction_start();
patcher->Patch(caller_instance, target_instance, target_func_index,
old_target, result->instruction_start());
}
return result->instruction_start(); return result->instruction_start();
} }
@ -881,15 +489,6 @@ void FlushICache(const wasm::NativeModule* native_module) {
} }
} }
void FlushICache(Handle<FixedArray> functions) {
for (int i = 0, e = functions->length(); i < e; ++i) {
if (!functions->get(i)->IsCode()) continue;
Code* code = Code::cast(functions->get(i));
Assembler::FlushICache(code->raw_instruction_start(),
code->raw_instruction_size());
}
}
byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) { byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset; return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
} }
@ -1089,24 +688,6 @@ void FinishCompilationUnits(CompilationState* compilation_state,
} }
} }
void UpdateAllCompiledModulesWithTopTierCode(
Handle<WasmModuleObject> module_object) {
WasmModule* module = module_object->module();
DCHECK_GT(module->functions.size() - module->num_imported_functions, 0);
USE(module);
CodeSpaceMemoryModificationScope modification_scope(
module_object->GetIsolate()->heap());
NativeModule* native_module =
module_object->compiled_module()->GetNativeModule();
// Link.
CodeSpecialization code_specialization;
code_specialization.RelocateDirectCalls(native_module);
code_specialization.ApplyToWholeModule(native_module, module_object);
}
void CompileInParallel(Isolate* isolate, NativeModule* native_module, void CompileInParallel(Isolate* isolate, NativeModule* native_module,
const ModuleWireBytes& wire_bytes, ModuleEnv* module_env, const ModuleWireBytes& wire_bytes, ModuleEnv* module_env,
Handle<WasmModuleObject> module_object, Handle<WasmModuleObject> module_object,
@ -1147,53 +728,6 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module,
compilation_state->SetNumberOfFunctionsToCompile(functions_count); compilation_state->SetNumberOfFunctionsToCompile(functions_count);
compilation_state->SetWireBytes(wire_bytes); compilation_state->SetWireBytes(wire_bytes);
DeferredHandles* deferred_handles = nullptr;
Handle<WasmModuleObject> module_object_deferred;
if (compilation_state->compile_mode() == CompileMode::kTiering) {
// Open a deferred handle scope for the module_object, in order to allow
// for background tiering compilation.
DeferredHandleScope deferred(isolate);
module_object_deferred = handle(*module_object, isolate);
deferred_handles = deferred.Detach();
}
compilation_state->AddCallback(
[module_object_deferred, deferred_handles](
// Callback is called from a foreground thread.
CompilationEvent event, ErrorThrower* thrower) mutable {
switch (event) {
case CompilationEvent::kFinishedBaselineCompilation:
// Nothing to do, since we are finishing baseline compilation
// in this foreground thread.
return;
case CompilationEvent::kFinishedTopTierCompilation:
UpdateAllCompiledModulesWithTopTierCode(module_object_deferred);
// TODO(wasm): Currently compilation has to finish before the
// {deferred_handles} can be removed. We need to make sure that
// we can clean it up at a time when the native module
// should die (but currently cannot, since it's kept alive
// through the {deferred_handles} themselves).
delete deferred_handles;
deferred_handles = nullptr;
return;
case CompilationEvent::kFailedCompilation:
// If baseline compilation failed, we will reflect this without
// a callback, in this thread through {thrower}.
// Tier-up compilation should not fail if baseline compilation
// did not fail.
DCHECK(!module_object_deferred->compiled_module()
->GetNativeModule()
->compilation_state()
->baseline_compilation_finished());
delete deferred_handles;
deferred_handles = nullptr;
return;
case CompilationEvent::kDestroyed:
if (deferred_handles) delete deferred_handles;
return;
}
UNREACHABLE();
});
// 1) The main thread allocates a compilation unit for each wasm function // 1) The main thread allocates a compilation unit for each wasm function
// and stores them in the vector {compilation_units} within the // and stores them in the vector {compilation_units} within the
// {compilation_state}. By adding units to the {compilation_state}, new // {compilation_state}. By adding units to the {compilation_state}, new
@ -1813,10 +1347,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
CodeSpecialization code_specialization; CodeSpecialization code_specialization;
code_specialization.RelocateDirectCalls(native_module); code_specialization.RelocateDirectCalls(native_module);
code_specialization.ApplyToWholeModule(native_module, module_object_, code_specialization.ApplyToWholeModule(native_module, SKIP_ICACHE_FLUSH);
SKIP_ICACHE_FLUSH);
FlushICache(native_module); FlushICache(native_module);
FlushICache(handle(module_object_->export_wrappers(), isolate_));
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
// Insert the compiled module into the weak list of compiled modules. // Insert the compiled module into the weak list of compiled modules.
@ -1857,7 +1389,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
if (module_->start_function_index >= 0) { if (module_->start_function_index >= 0) {
int start_index = module_->start_function_index; int start_index = module_->start_function_index;
Handle<WasmInstanceObject> start_function_instance = instance;
Address start_call_address = Address start_call_address =
static_cast<uint32_t>(start_index) < module_->num_imported_functions static_cast<uint32_t>(start_index) < module_->num_imported_functions
? kNullAddress ? kNullAddress
@ -1868,7 +1399,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// TODO(clemensh): Don't generate an exported function for the start // TODO(clemensh): Don't generate an exported function for the start
// function. Use CWasmEntry instead. // function. Use CWasmEntry instead.
start_function_ = WasmExportedFunction::New( start_function_ = WasmExportedFunction::New(
isolate_, start_function_instance, MaybeHandle<String>(), start_index, isolate_, instance, MaybeHandle<String>(), start_index,
static_cast<int>(sig->parameter_count()), wrapper_code); static_cast<int>(sig->parameter_count()), wrapper_code);
} }
@ -2121,15 +1652,14 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
int num_imported_mutable_globals = 0; int num_imported_mutable_globals = 0;
DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size()); DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
for (int index = 0; index < static_cast<int>(module_->import_table.size()); int num_imports = static_cast<int>(module_->import_table.size());
++index) { NativeModule* native_module = instance->compiled_module()->GetNativeModule();
for (int index = 0; index < num_imports; ++index) {
WasmImport& import = module_->import_table[index]; WasmImport& import = module_->import_table[index];
Handle<String> module_name = sanitized_imports_[index].module_name; Handle<String> module_name = sanitized_imports_[index].module_name;
Handle<String> import_name = sanitized_imports_[index].import_name; Handle<String> import_name = sanitized_imports_[index].import_name;
Handle<Object> value = sanitized_imports_[index].value; Handle<Object> value = sanitized_imports_[index].value;
NativeModule* native_module =
instance->compiled_module()->GetNativeModule();
switch (import.kind) { switch (import.kind) {
case kExternalFunction: { case kExternalFunction: {
@ -2159,8 +1689,8 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
return -1; return -1;
} }
// The import reference is the instance object itself. // The import reference is the instance object itself.
ImportedFunctionEntry entry(instance, func_index);
Address imported_target = imported_function->GetWasmCallTarget(); Address imported_target = imported_function->GetWasmCallTarget();
ImportedFunctionEntry entry(instance, func_index);
entry.set_wasm_to_wasm(*imported_instance, imported_target); entry.set_wasm_to_wasm(*imported_instance, imported_target);
} else { } else {
// The imported function is a callable. // The imported function is a callable.
@ -3130,17 +2660,17 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
} }
return; return;
case CompilationEvent::kFinishedTopTierCompilation: case CompilationEvent::kFinishedTopTierCompilation:
// It is only safe to schedule the UpdateToTopTierCompiledCode // It is only safe to remove the AsyncCompileJob if no
// step if no foreground task is currently pending, and no // foreground task is currently pending, and no finisher is
// finisher is outstanding (streaming compilation). // outstanding (streaming compilation).
if (job->num_pending_foreground_tasks_ == 0 && if (job->num_pending_foreground_tasks_ == 0 &&
job->outstanding_finishers_.Value() == 0) { job->outstanding_finishers_.Value() == 0) {
job->DoSync<UpdateToTopTierCompiledCode>(); job->isolate_->wasm_engine()->RemoveCompileJob(job);
} } else {
// If a foreground task was pending or a finsher was pending, // If a foreground task was pending or a finsher was pending,
// we will rely on FinishModule to switch the step to // we will rely on FinishModule to remove the job.
// UpdateToTopTierCompiledCode.
job->tiering_completed_ = true; job->tiering_completed_ = true;
}
return; return;
case CompilationEvent::kFailedCompilation: { case CompilationEvent::kFailedCompilation: {
// Tier-up compilation should not fail if baseline compilation // Tier-up compilation should not fail if baseline compilation
@ -3242,21 +2772,9 @@ class AsyncCompileJob::FinishModule : public CompileStep {
->compilation_state() ->compilation_state()
->compile_mode()); ->compile_mode());
if (job_->tiering_completed_) { if (job_->tiering_completed_) {
job_->DoSync<UpdateToTopTierCompiledCode>();
}
}
};
//==========================================================================
// Step 7 (sync): Update with top tier code.
//==========================================================================
class AsyncCompileJob::UpdateToTopTierCompiledCode : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("(7) Update native module to use optimized code...\n");
UpdateAllCompiledModulesWithTopTierCode(job_->module_object_);
job_->isolate_->wasm_engine()->RemoveCompileJob(job_); job_->isolate_->wasm_engine()->RemoveCompileJob(job_);
} }
}
}; };
class AsyncCompileJob::AbortCompilation : public CompileStep { class AsyncCompileJob::AbortCompilation : public CompileStep {

View File

@ -65,15 +65,8 @@ V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
Isolate* isolate, const ModuleWireBytes& wire_bytes); Isolate* isolate, const ModuleWireBytes& wire_bytes);
// Triggered by the WasmCompileLazy builtin. // Triggered by the WasmCompileLazy builtin.
// Walks the stack (top three frames) to determine the wasm instance involved // Returns the instruction start of the compiled code object.
// and which function to compile. Address CompileLazy(Isolate*, NativeModule*, uint32_t func_index);
// Then triggers WasmCompiledModule::CompileLazy, taking care of correctly
// patching the call site or indirect function tables.
// Returns either the Code object that has been lazily compiled, or Illegal if
// an error occurred. In the latter case, a pending exception has been set,
// which will be triggered when returning from the runtime function, i.e. the
// Illegal builtin will never be called.
Address CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance);
// Encapsulates all the state and steps of an asynchronous compilation. // Encapsulates all the state and steps of an asynchronous compilation.
// An asynchronous compile job consists of a number of tasks that are executed // An asynchronous compile job consists of a number of tasks that are executed

View File

@ -51,20 +51,6 @@ constexpr bool kModuleCanAllocateMoreMemory = true;
constexpr bool kNeedsTrampoline = !kModuleCanAllocateMoreMemory; constexpr bool kNeedsTrampoline = !kModuleCanAllocateMoreMemory;
void RelocateCode(WasmCode* code, const WasmCode* orig,
WasmCode::FlushICache flush_icache) {
intptr_t delta = code->instruction_start() - orig->instruction_start();
for (RelocIterator it(code->instructions(), code->reloc_info(),
code->constant_pool(), RelocInfo::kApplyMask);
!it.done(); it.next()) {
it.rinfo()->apply(delta);
}
if (flush_icache) {
Assembler::FlushICache(code->instructions().start(),
code->instructions().size());
}
}
} // namespace } // namespace
void DisjointAllocationPool::Merge(AddressRange range) { void DisjointAllocationPool::Merge(AddressRange range) {
@ -296,6 +282,8 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
return "interpreter entry"; return "interpreter entry";
case WasmCode::kTrampoline: case WasmCode::kTrampoline:
return "trampoline"; return "trampoline";
case WasmCode::kJumpTable:
return "jump table";
} }
return "unknown kind"; return "unknown kind";
} }
@ -316,26 +304,30 @@ WasmCode::~WasmCode() {
base::AtomicNumber<size_t> NativeModule::next_id_; base::AtomicNumber<size_t> NativeModule::next_id_;
NativeModule::NativeModule(Isolate* isolate, uint32_t num_functions, NativeModule::NativeModule(Isolate* isolate, uint32_t num_functions,
uint32_t num_imports, bool can_request_more, uint32_t num_imported_functions,
VirtualMemory* code_space, bool can_request_more, VirtualMemory* code_space,
WasmCodeManager* code_manager, ModuleEnv& env) WasmCodeManager* code_manager, ModuleEnv& env)
: instance_id(next_id_.Increment(1)), : instance_id(next_id_.Increment(1)),
num_functions_(num_functions), num_functions_(num_functions),
num_imported_functions_(num_imports), num_imported_functions_(num_imported_functions),
compilation_state_(NewCompilationState(isolate, env)), compilation_state_(NewCompilationState(isolate, env)),
free_code_space_({code_space->address(), code_space->end()}), free_code_space_({code_space->address(), code_space->end()}),
wasm_code_manager_(code_manager), wasm_code_manager_(code_manager),
can_request_more_memory_(can_request_more), can_request_more_memory_(can_request_more),
use_trap_handler_(env.use_trap_handler) { use_trap_handler_(env.use_trap_handler) {
if (num_functions > 0) {
uint32_t num_wasm_functions = num_functions - num_imports;
code_table_.reset(new WasmCode*[num_wasm_functions]);
memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
}
VirtualMemory my_mem; VirtualMemory my_mem;
owned_code_space_.push_back(my_mem); owned_code_space_.push_back(my_mem);
owned_code_space_.back().TakeControl(code_space); owned_code_space_.back().TakeControl(code_space);
owned_code_.reserve(num_functions); owned_code_.reserve(num_functions);
DCHECK_LE(num_imported_functions, num_functions);
uint32_t num_wasm_functions = num_functions - num_imported_functions;
if (num_wasm_functions > 0) {
code_table_.reset(new WasmCode*[num_wasm_functions]);
memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
jump_table_ = CreateEmptyJumpTable(num_wasm_functions);
}
} }
void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) { void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
@ -346,6 +338,9 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
memset(new_table, 0, max_wasm * sizeof(*new_table)); memset(new_table, 0, max_wasm * sizeof(*new_table));
memcpy(new_table, code_table_.get(), num_wasm * sizeof(*new_table)); memcpy(new_table, code_table_.get(), num_wasm * sizeof(*new_table));
code_table_.reset(new_table); code_table_.reset(new_table);
// Re-allocate jump table.
jump_table_ = CreateEmptyJumpTable(max_wasm);
} }
void NativeModule::SetNumFunctionsForTesting(uint32_t num_functions) { void NativeModule::SetNumFunctionsForTesting(uint32_t num_functions) {
@ -402,6 +397,7 @@ WasmCode* NativeModule::AddOwnedCode(
std::upper_bound(owned_code_.begin(), owned_code_.end(), std::upper_bound(owned_code_.begin(), owned_code_.end(),
ret->instruction_start(), WasmCodeUniquePtrComparator()); ret->instruction_start(), WasmCodeUniquePtrComparator());
owned_code_.insert(insert_before, std::move(code)); owned_code_.insert(insert_before, std::move(code));
if (flush_icache) { if (flush_icache) {
Assembler::FlushICache(ret->instructions().start(), Assembler::FlushICache(ret->instructions().start(),
ret->instructions().size()); ret->instructions().size());
@ -422,14 +418,29 @@ WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
WasmCode* NativeModule::AddInterpreterEntry(Handle<Code> code, uint32_t index) { WasmCode* NativeModule::AddInterpreterEntry(Handle<Code> code, uint32_t index) {
WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterEntry); WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterEntry);
ret->index_ = Just(index); ret->index_ = Just(index);
PatchJumpTable(index, ret->instruction_start(), WasmCode::kFlushICache);
return ret; return ret;
} }
void NativeModule::SetLazyBuiltin(Handle<Code> code) { void NativeModule::SetLazyBuiltin(Handle<Code> code) {
uint32_t num_wasm_functions = num_functions_ - num_imported_functions_;
if (num_wasm_functions == 0) return;
WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub); WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub);
for (WasmCode*& code_table_entry : code_table()) { // Fill the jump table with jumps to the lazy compile stub.
code_table_entry = lazy_builtin; Address lazy_compile_target = lazy_builtin->instruction_start();
JumpTableAssembler jtasm(
jump_table_->instruction_start(),
static_cast<int>(jump_table_->instructions().size()) + 256);
for (uint32_t i = 0; i < num_wasm_functions; ++i) {
// Check that the offset in the jump table increases as expected.
DCHECK_EQ(i * JumpTableAssembler::kJumpTableSlotSize, jtasm.pc_offset());
jtasm.EmitLazyCompileJumpSlot(i + num_imported_functions_,
lazy_compile_target);
jtasm.NopBytes((i + 1) * JumpTableAssembler::kJumpTableSlotSize -
jtasm.pc_offset());
} }
Assembler::FlushICache(jump_table_->instructions().start(),
jump_table_->instructions().size());
} }
void NativeModule::SetRuntimeStubs(Isolate* isolate) { void NativeModule::SetRuntimeStubs(Isolate* isolate) {
@ -591,10 +602,11 @@ WasmCode* NativeModule::AddCode(
} }
} }
set_code(index, ret);
if (use_trap_handler_) { if (use_trap_handler_) {
ret->RegisterTrapHandlerData(); ret->RegisterTrapHandlerData();
} }
set_code(index, ret);
PatchJumpTable(index, ret->instruction_start(), WasmCode::kFlushICache);
// Flush the i-cache here instead of in AddOwnedCode, to include the changes // Flush the i-cache here instead of in AddOwnedCode, to include the changes
// made while iterating over the RelocInfo above. // made while iterating over the RelocInfo above.
@ -639,6 +651,38 @@ Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
return ret; return ret;
} }
WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) {
// Only call this if we really need a jump table.
DCHECK_LT(0, num_wasm_functions);
size_t jump_table_size =
num_wasm_functions * JumpTableAssembler::kJumpTableSlotSize;
std::unique_ptr<byte[]> instructions(new byte[jump_table_size]);
memset(instructions.get(), 0, jump_table_size);
return AddOwnedCode({instructions.get(), jump_table_size}, // instructions
nullptr, // reloc_info
0, // reloc_size
nullptr, // source_pos
0, // source_pos_size
Nothing<uint32_t>(), // index
WasmCode::kJumpTable, // kind
0, // constant_pool_offset
0, // stack_slots
0, // safepoint_table_offset
0, // handler_table_offset
{}, // protected_instructions
WasmCode::kOther, // tier
WasmCode::kNoFlushICache); // flush_icache
}
void NativeModule::PatchJumpTable(uint32_t func_index, Address target,
WasmCode::FlushICache flush_icache) {
DCHECK_LE(num_imported_functions_, func_index);
uint32_t slot_idx = func_index - num_imported_functions_;
Address jump_table_slot = jump_table_->instruction_start() +
slot_idx * JumpTableAssembler::kJumpTableSlotSize;
JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, target, flush_icache);
}
Address NativeModule::GetLocalAddressFor(Handle<Code> code) { Address NativeModule::GetLocalAddressFor(Handle<Code> code) {
DCHECK(Heap::IsImmovable(*code)); DCHECK(Heap::IsImmovable(*code));
@ -733,7 +777,7 @@ Address NativeModule::AllocateForCode(size_t size) {
return mem.start; return mem.start;
} }
WasmCode* NativeModule::Lookup(Address pc) { WasmCode* NativeModule::Lookup(Address pc) const {
if (owned_code_.empty()) return nullptr; if (owned_code_.empty()) return nullptr;
auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), pc, auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
WasmCodeUniquePtrComparator()); WasmCodeUniquePtrComparator());
@ -744,66 +788,27 @@ WasmCode* NativeModule::Lookup(Address pc) {
return candidate->contains(pc) ? candidate : nullptr; return candidate->contains(pc) ? candidate : nullptr;
} }
Address NativeModule::GetCallTargetForFunction(uint32_t func_index) { Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
// TODO(clemensh): Introduce a jump table and return a slot of it here. // TODO(clemensh): Measure performance win of returning instruction start
WasmCode* wasm_code = code(func_index); // directly if we have turbofan code. Downside: Redirecting functions (e.g.
if (!wasm_code) return kNullAddress; // for debugging) gets much harder.
if (wasm_code->kind() != WasmCode::kLazyStub) {
return wasm_code->instruction_start();
}
DCHECK_IMPLIES(func_index < num_imported_functions_, // Return the jump table slot for that function index.
!wasm_code->IsAnonymous()); DCHECK_NOT_NULL(jump_table_);
if (!wasm_code->IsAnonymous()) { uint32_t slot_idx = func_index - num_imported_functions_;
// If the function wasn't imported, its index should match. DCHECK_LT(slot_idx, jump_table_->instructions().size() /
DCHECK_IMPLIES(func_index >= num_imported_functions_, JumpTableAssembler::kJumpTableSlotSize);
func_index == wasm_code->index()); return jump_table_->instruction_start() +
return wasm_code->instruction_start(); slot_idx * JumpTableAssembler::kJumpTableSlotSize;
}
if (lazy_compile_stubs_ == nullptr) {
lazy_compile_stubs_.reset(new WasmCode*[num_functions_]);
memset(lazy_compile_stubs_.get(), 0, num_functions_ * sizeof(WasmCode*));
}
WasmCode* cloned_code = lazy_compile_stubs_[func_index];
if (cloned_code == nullptr) {
cloned_code = CloneCode(wasm_code, WasmCode::kNoFlushICache);
RelocateCode(cloned_code, wasm_code, WasmCode::kFlushICache);
cloned_code->index_ = Just(func_index);
lazy_compile_stubs_[func_index] = cloned_code;
}
DCHECK_EQ(func_index, cloned_code->index());
return cloned_code->instruction_start();
} }
WasmCode* NativeModule::CloneCode(const WasmCode* original_code, uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(Address slot_address) {
WasmCode::FlushICache flush_icache) { DCHECK(is_jump_table_slot(slot_address));
std::unique_ptr<byte[]> reloc_info; uint32_t offset =
if (original_code->reloc_info().size() > 0) { static_cast<uint32_t>(slot_address - jump_table_->instruction_start());
reloc_info.reset(new byte[original_code->reloc_info().size()]); uint32_t slot_idx = offset / JumpTableAssembler::kJumpTableSlotSize;
memcpy(reloc_info.get(), original_code->reloc_info().start(), DCHECK_LT(slot_idx, num_functions_ - num_imported_functions_);
original_code->reloc_info().size()); return num_imported_functions_ + slot_idx;
}
std::unique_ptr<byte[]> source_pos;
if (original_code->source_positions().size() > 0) {
source_pos.reset(new byte[original_code->source_positions().size()]);
memcpy(source_pos.get(), original_code->source_positions().start(),
original_code->source_positions().size());
}
DCHECK_EQ(0, original_code->protected_instructions().size());
std::unique_ptr<ProtectedInstructions> protected_instructions(
new ProtectedInstructions(0));
WasmCode* ret = AddOwnedCode(
original_code->instructions(), std::move(reloc_info),
original_code->reloc_info().size(), std::move(source_pos),
original_code->source_positions().size(), original_code->index_,
original_code->kind(), original_code->constant_pool_offset_,
original_code->stack_slots(), original_code->safepoint_table_offset_,
original_code->handler_table_offset_, std::move(protected_instructions),
original_code->tier(), flush_icache);
if (!ret->IsAnonymous()) {
set_code(ret->index(), ret);
}
return ret;
} }
void NativeModule::DisableTrapHandler() { void NativeModule::DisableTrapHandler() {
@ -888,16 +893,22 @@ void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
reinterpret_cast<void*>(ret->end()), ret->size()); reinterpret_cast<void*>(ret->end()), ret->size());
} }
// static
size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) { size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) {
constexpr size_t kCodeSizeMultiplier = 4; constexpr size_t kCodeSizeMultiplier = 4;
constexpr size_t kImportSize = 32 * kPointerSize; constexpr size_t kImportSize = 32 * kPointerSize;
uint32_t num_functions = static_cast<uint32_t>(module->functions.size());
uint32_t num_wasm_functions = num_functions - module->num_imported_functions;
size_t estimate = size_t estimate =
AllocatePageSize() /* TODO(titzer): 1 page spot bonus */ + AllocatePageSize() /* TODO(titzer): 1 page spot bonus */ +
sizeof(NativeModule) + sizeof(NativeModule) +
(sizeof(WasmCode*) * module->functions.size() /* code table size */) + (sizeof(WasmCode*) * num_wasm_functions /* code table size */) +
(sizeof(WasmCode) * module->functions.size() /* code object size */) + (sizeof(WasmCode) * num_wasm_functions /* code object size */) +
(kImportSize * module->num_imported_functions /* import size */); (kImportSize * module->num_imported_functions /* import size */) +
(JumpTableAssembler::kJumpTableSlotSize *
num_wasm_functions /* jump table size */);
for (auto& function : module->functions) { for (auto& function : module->functions) {
estimate += kCodeSizeMultiplier * function.code.length(); estimate += kCodeSizeMultiplier * function.code.length();
@ -1032,7 +1043,7 @@ WasmCode* WasmCodeManager::GetCodeFromStartAddress(Address pc) const {
return code; return code;
} }
WasmCode* WasmCodeManager::LookupCode(Address pc) const { NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
if (lookup_map_.empty()) return nullptr; if (lookup_map_.empty()) return nullptr;
auto iter = lookup_map_.upper_bound(pc); auto iter = lookup_map_.upper_bound(pc);
@ -1043,8 +1054,12 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const {
NativeModule* candidate = iter->second.second; NativeModule* candidate = iter->second.second;
DCHECK_NOT_NULL(candidate); DCHECK_NOT_NULL(candidate);
if (range_start <= pc && pc < range_end) return candidate->Lookup(pc); return range_start <= pc && pc < range_end ? candidate : nullptr;
return nullptr; }
WasmCode* WasmCodeManager::LookupCode(Address pc) const {
NativeModule* candidate = LookupNativeModule(pc);
return candidate ? candidate->Lookup(pc) : nullptr;
} }
void WasmCodeManager::Free(VirtualMemory* mem) { void WasmCodeManager::Free(VirtualMemory* mem) {

View File

@ -94,7 +94,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
kLazyStub, kLazyStub,
kRuntimeStub, kRuntimeStub,
kInterpreterEntry, kInterpreterEntry,
kTrampoline kTrampoline,
kJumpTable
}; };
// Each runtime stub is identified by an id. This id is used to reference the // Each runtime stub is identified by an id. This id is used to reference the
@ -251,10 +252,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddInterpreterEntry(Handle<Code> code, uint32_t index); WasmCode* AddInterpreterEntry(Handle<Code> code, uint32_t index);
// When starting lazy compilation, provide the WasmLazyCompile builtin by // When starting lazy compilation, provide the WasmLazyCompile builtin by
// calling SetLazyBuiltin. It will initialize the code table with it. Copies // calling SetLazyBuiltin. It will be copied into this NativeModule and the
// of it might be cloned from them later when creating entries for exported // jump table will be populated with that copy.
// functions and indirect callable functions, so that they may be identified
// by the runtime.
void SetLazyBuiltin(Handle<Code> code); void SetLazyBuiltin(Handle<Code> code);
// Initializes all runtime stubs by copying them over from the JS-allocated // Initializes all runtime stubs by copying them over from the JS-allocated
@ -282,6 +281,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
return code; return code;
} }
bool is_jump_table_slot(Address address) const {
return jump_table_->contains(address);
}
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address);
// Transition this module from code relying on trap handlers (i.e. without // Transition this module from code relying on trap handlers (i.e. without
// explicit memory bounds checks) to code that does not require trap handlers // explicit memory bounds checks) to code that does not require trap handlers
// (i.e. code with explicit bounds checks). // (i.e. code with explicit bounds checks).
@ -290,11 +295,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// after calling this method. // after calling this method.
void DisableTrapHandler(); void DisableTrapHandler();
// Returns the instruction start of code suitable for indirect or import calls // Returns the target to call for the given function (returns a jump table
// for the given function index. If the code at the given index is the lazy // slot within {jump_table_}).
// compile stub, it will clone a non-anonymous lazy compile stub for the Address GetCallTargetForFunction(uint32_t func_index) const;
// purpose. This will soon change to always return a jump table slot.
Address GetCallTargetForFunction(uint32_t index);
bool SetExecutable(bool executable); bool SetExecutable(bool executable);
@ -322,6 +325,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; } void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; } bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
WasmCode* Lookup(Address) const;
const size_t instance_id = 0; const size_t instance_id = 0;
~NativeModule(); ~NativeModule();
@ -333,9 +338,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
friend class NativeModuleModificationScope; friend class NativeModuleModificationScope;
static base::AtomicNumber<size_t> next_id_; static base::AtomicNumber<size_t> next_id_;
NativeModule(Isolate* isolate, uint32_t num_functions, uint32_t num_imports, NativeModule(Isolate* isolate, uint32_t num_functions,
bool can_request_more, VirtualMemory* code_space, uint32_t num_imported_functions, bool can_request_more,
WasmCodeManager* code_manager, ModuleEnv& env); VirtualMemory* code_space, WasmCodeManager* code_manager,
ModuleEnv& env);
WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind); WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind);
Address AllocateForCode(size_t size); Address AllocateForCode(size_t size);
@ -354,13 +360,16 @@ class V8_EXPORT_PRIVATE NativeModule final {
size_t handler_table_offset, size_t handler_table_offset,
std::unique_ptr<ProtectedInstructions>, WasmCode::Tier, std::unique_ptr<ProtectedInstructions>, WasmCode::Tier,
WasmCode::FlushICache); WasmCode::FlushICache);
WasmCode* CloneCode(const WasmCode*, WasmCode::FlushICache);
WasmCode* Lookup(Address);
Address GetLocalAddressFor(Handle<Code>); Address GetLocalAddressFor(Handle<Code>);
Address CreateTrampolineTo(Handle<Code>); Address CreateTrampolineTo(Handle<Code>);
// TODO(7424): Only used for debugging in {WasmCode::Validate}. Remove. // TODO(7424): Only used for debugging in {WasmCode::Validate}. Remove.
Code* ReverseTrampolineLookup(Address target); Code* ReverseTrampolineLookup(Address target);
WasmCode* CreateEmptyJumpTable(uint32_t num_wasm_functions);
void PatchJumpTable(uint32_t func_index, Address target,
WasmCode::FlushICache);
void set_code(uint32_t index, WasmCode* code) { void set_code(uint32_t index, WasmCode* code) {
DCHECK_LT(index, num_functions_); DCHECK_LT(index, num_functions_);
DCHECK_LE(num_imported_functions_, index); DCHECK_LE(num_imported_functions_, index);
@ -375,7 +384,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t num_functions_; uint32_t num_functions_;
uint32_t num_imported_functions_; uint32_t num_imported_functions_;
std::unique_ptr<WasmCode* []> code_table_; std::unique_ptr<WasmCode* []> code_table_;
std::unique_ptr<WasmCode* []> lazy_compile_stubs_;
WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr}; WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
@ -383,6 +391,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// start of the trampoline. // start of the trampoline.
std::unordered_map<Address, Address> trampolines_; std::unordered_map<Address, Address> trampolines_;
// Jump table used to easily redirect wasm function calls.
WasmCode* jump_table_ = nullptr;
std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_; std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_;
// A phantom reference to the {WasmModuleObject}. It is intentionally not // A phantom reference to the {WasmModuleObject}. It is intentionally not
@ -423,6 +434,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
Isolate* isolate, size_t memory_estimate, uint32_t num_functions, Isolate* isolate, size_t memory_estimate, uint32_t num_functions,
uint32_t num_imported_functions, bool can_request_more, ModuleEnv& env); uint32_t num_imported_functions, bool can_request_more, ModuleEnv& env);
NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const; WasmCode* LookupCode(Address pc) const;
WasmCode* GetCodeFromStartAddress(Address pc) const; WasmCode* GetCodeFromStartAddress(Address pc) const;
size_t remaining_uncommitted_code_space() const; size_t remaining_uncommitted_code_space() const;

View File

@ -68,62 +68,19 @@ void CodeSpecialization::RelocateDirectCalls(NativeModule* native_module) {
relocate_direct_calls_module_ = native_module; relocate_direct_calls_module_ = native_module;
} }
bool CodeSpecialization::ApplyToWholeModule( bool CodeSpecialization::ApplyToWholeModule(NativeModule* native_module,
NativeModule* native_module, Handle<WasmModuleObject> module_object,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
WasmModule* module = module_object->module();
std::vector<WasmFunction>* wasm_functions =
&module_object->module()->functions;
FixedArray* export_wrappers = module_object->export_wrappers();
DCHECK_EQ(export_wrappers->length(), module->num_exported_functions);
bool changed = false; bool changed = false;
int func_index = module->num_imported_functions;
// Patch all wasm functions. // Patch all wasm functions.
for (int num_wasm_functions = static_cast<int>(wasm_functions->size()); for (WasmCode* wasm_code : native_module->code_table()) {
func_index < num_wasm_functions; ++func_index) { if (wasm_code == nullptr) continue;
WasmCode* wasm_function = native_module->code(func_index); if (wasm_code->kind() != WasmCode::kFunction) continue;
// TODO(clemensh): Get rid of this nullptr check changed |= ApplyToWasmCode(wasm_code, icache_flush_mode);
if (wasm_function == nullptr ||
wasm_function->kind() != WasmCode::kFunction) {
continue;
}
changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
} }
// Patch all exported functions (JS_TO_WASM_FUNCTION).
int reloc_mode = 0;
// Patch CODE_TARGET if we shall relocate direct calls. If we patch direct
// calls, the instance registered for that (relocate_direct_calls_module_)
// should match the instance we currently patch (instance).
if (relocate_direct_calls_module_ != nullptr) {
DCHECK_EQ(native_module, relocate_direct_calls_module_);
reloc_mode |= RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL);
}
if (!reloc_mode) return changed;
int wrapper_index = 0;
for (auto exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
Code* export_wrapper = Code::cast(export_wrappers->get(wrapper_index++));
if (export_wrapper->kind() != Code::JS_TO_WASM_FUNCTION) continue;
for (RelocIterator it(export_wrapper, reloc_mode); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
case RelocInfo::JS_TO_WASM_CALL: {
changed = true;
Address new_target =
native_module->GetCallTargetForFunction(exp.index);
it.rinfo()->set_js_to_wasm_address(new_target, icache_flush_mode);
} break;
default:
UNREACHABLE();
}
}
}
DCHECK_EQ(module->functions.size(), func_index);
DCHECK_EQ(export_wrappers->length(), wrapper_index);
return changed; return changed;
} }
@ -167,9 +124,9 @@ bool CodeSpecialization::ApplyToWasmCode(wasm::WasmCode* code,
uint32_t called_func_index = ExtractDirectCallIndex( uint32_t called_func_index = ExtractDirectCallIndex(
patch_direct_calls_helper->decoder, patch_direct_calls_helper->decoder,
patch_direct_calls_helper->func_bytes + byte_pos); patch_direct_calls_helper->func_bytes + byte_pos);
const WasmCode* new_code = native_module->code(called_func_index); Address new_target =
it.rinfo()->set_wasm_call_address(new_code->instruction_start(), native_module->GetCallTargetForFunction(called_func_index);
icache_flush_mode); it.rinfo()->set_wasm_call_address(new_target, icache_flush_mode);
changed = true; changed = true;
} break; } break;

View File

@ -29,9 +29,8 @@ class CodeSpecialization {
// Update all direct call sites based on the code table in the given module. // Update all direct call sites based on the code table in the given module.
void RelocateDirectCalls(NativeModule* module); void RelocateDirectCalls(NativeModule* module);
// Apply all relocations and patching to all code in the module (i.e. wasm // Apply all relocations and patching to all code in the module.
// code and exported function wrapper code). bool ApplyToWholeModule(NativeModule*,
bool ApplyToWholeModule(NativeModule*, Handle<WasmModuleObject>,
ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED); ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
// Apply all relocations and patching to one wasm code object. // Apply all relocations and patching to one wasm code object.
bool ApplyToWasmCode(wasm::WasmCode*, bool ApplyToWasmCode(wasm::WasmCode*,

View File

@ -568,56 +568,6 @@ Handle<FixedArray> GetOrCreateInterpretedFunctions(
return new_arr; return new_arr;
} }
using CodeRelocationMap = std::map<Address, Address>;
void RedirectCallsitesInCode(Isolate* isolate, const wasm::WasmCode* code,
CodeRelocationMap* map) {
DisallowHeapAllocation no_gc;
for (RelocIterator it(code->instructions(), code->reloc_info(),
code->constant_pool(),
RelocInfo::ModeMask(RelocInfo::WASM_CALL));
!it.done(); it.next()) {
Address target = it.rinfo()->target_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
it.rinfo()->set_wasm_call_address(new_target->second);
}
}
void RedirectCallsitesInJSWrapperCode(Isolate* isolate, Code* code,
CodeRelocationMap* map) {
DisallowHeapAllocation no_gc;
for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
!it.done(); it.next()) {
Address target = it.rinfo()->js_to_wasm_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
it.rinfo()->set_js_to_wasm_address(new_target->second);
}
}
void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
CodeRelocationMap* map) {
DisallowHeapAllocation no_gc;
// Redirect all calls in wasm functions.
wasm::NativeModule* native_module =
instance->compiled_module()->GetNativeModule();
for (uint32_t i = native_module->num_imported_functions(),
e = native_module->num_functions();
i < e; ++i) {
wasm::WasmCode* code = native_module->code(i);
RedirectCallsitesInCode(isolate, code, map);
}
// TODO(6668): Find instances that imported our code and also patch those.
// Redirect all calls in exported functions.
FixedArray* export_wrapper = instance->module_object()->export_wrappers();
for (int i = 0, e = export_wrapper->length(); i != e; ++i) {
Code* code = Code::cast(export_wrapper->get(i));
RedirectCallsitesInJSWrapperCode(isolate, code, map);
}
}
} // namespace } // namespace
Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) { Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
@ -663,7 +613,6 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
wasm::NativeModule* native_module = wasm::NativeModule* native_module =
instance->compiled_module()->GetNativeModule(); instance->compiled_module()->GetNativeModule();
wasm::WasmModule* module = instance->module(); wasm::WasmModule* module = instance->module();
CodeRelocationMap code_to_relocate;
// We may modify js wrappers, as well as wasm functions. Hence the 2 // We may modify js wrappers, as well as wasm functions. Hence the 2
// modification scopes. // modification scopes.
@ -680,16 +629,10 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
isolate, func_index, module->functions[func_index].sig); isolate, func_index, module->functions[func_index].sig);
const wasm::WasmCode* wasm_new_code = native_module->AddInterpreterEntry( const wasm::WasmCode* wasm_new_code = native_module->AddInterpreterEntry(
new_code.ToHandleChecked(), func_index); new_code.ToHandleChecked(), func_index);
const wasm::WasmCode* old_code =
native_module->code(static_cast<uint32_t>(func_index));
Handle<Foreign> foreign_holder = isolate->factory()->NewForeign( Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
wasm_new_code->instruction_start(), TENURED); wasm_new_code->instruction_start(), TENURED);
interpreted_functions->set(func_index, *foreign_holder); interpreted_functions->set(func_index, *foreign_holder);
DCHECK_EQ(0, code_to_relocate.count(old_code->instruction_start()));
code_to_relocate.insert(std::make_pair(old_code->instruction_start(),
wasm_new_code->instruction_start()));
} }
RedirectCallsitesInInstance(isolate, *instance, &code_to_relocate);
} }
void WasmDebugInfo::PrepareStep(StepAction step_action) { void WasmDebugInfo::PrepareStep(StepAction step_action) {

View File

@ -2676,18 +2676,23 @@ class ThreadImpl {
return {ExternalCallResult::INVALID_FUNC}; return {ExternalCallResult::INVALID_FUNC};
} }
WasmCode* code;
Handle<WasmInstanceObject> instance;
{
IndirectFunctionTableEntry entry(instance_object_, entry_index); IndirectFunctionTableEntry entry(instance_object_, entry_index);
// Signature check. // Signature check.
if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) { if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
return {ExternalCallResult::SIGNATURE_MISMATCH}; return {ExternalCallResult::SIGNATURE_MISMATCH};
} }
instance = handle(entry.instance(), isolate); Handle<WasmInstanceObject> instance = handle(entry.instance(), isolate);
code = isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress( Address target = entry.target();
entry.target()); NativeModule* native_module =
isolate->wasm_engine()->code_manager()->LookupNativeModule(target);
WasmCode* code;
if (native_module->is_jump_table_slot(target)) {
uint32_t func_index =
native_module->GetFunctionIndexFromJumpTableSlot(target);
code = native_module->code(func_index);
} else {
code = native_module->Lookup(target);
} }
// Call either an internal or external WASM function. // Call either an internal or external WASM function.

View File

@ -169,8 +169,6 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_instances,
FixedArray, kIndirectFunctionTableInstancesOffset) FixedArray, kIndirectFunctionTableInstancesOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign, OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset) kManagedNativeAllocationsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_indirect_patcher, Foreign,
kManagedIndirectPatcherOffset)
ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset) ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset)
ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset) ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset)
ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset) ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)

View File

@ -78,7 +78,7 @@ class IndirectFunctionTableEntry {
// - target = pointer to wasm-to-js wrapper code entrypoint // - target = pointer to wasm-to-js wrapper code entrypoint
// - an imported wasm function from another instance, which has fields // - an imported wasm function from another instance, which has fields
// - instance = target instance // - instance = target instance
// - target = entrypoint to wasm code of the function // - target = entrypoint for the function
class ImportedFunctionEntry { class ImportedFunctionEntry {
public: public:
inline ImportedFunctionEntry(Handle<WasmInstanceObject>, int index); inline ImportedFunctionEntry(Handle<WasmInstanceObject>, int index);
@ -388,7 +388,6 @@ class WasmInstanceObject : public JSObject {
DECL_ACCESSORS(imported_function_callables, FixedArray) DECL_ACCESSORS(imported_function_callables, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_instances, FixedArray) DECL_OPTIONAL_ACCESSORS(indirect_function_table_instances, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign) DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
DECL_OPTIONAL_ACCESSORS(managed_indirect_patcher, Foreign)
DECL_ACCESSORS(undefined_value, Oddball) DECL_ACCESSORS(undefined_value, Oddball)
DECL_ACCESSORS(null_value, Oddball) DECL_ACCESSORS(null_value, Oddball)
DECL_ACCESSORS(centry_stub, Code) DECL_ACCESSORS(centry_stub, Code)
@ -423,7 +422,6 @@ class WasmInstanceObject : public JSObject {
V(kImportedFunctionCallablesOffset, kPointerSize) \ V(kImportedFunctionCallablesOffset, kPointerSize) \
V(kIndirectFunctionTableInstancesOffset, kPointerSize) \ V(kIndirectFunctionTableInstancesOffset, kPointerSize) \
V(kManagedNativeAllocationsOffset, kPointerSize) \ V(kManagedNativeAllocationsOffset, kPointerSize) \
V(kManagedIndirectPatcherOffset, kPointerSize) \
V(kUndefinedValueOffset, kPointerSize) \ V(kUndefinedValueOffset, kPointerSize) \
V(kNullValueOffset, kPointerSize) \ V(kNullValueOffset, kPointerSize) \
V(kCEntryStubOffset, kPointerSize) \ V(kCEntryStubOffset, kPointerSize) \

View File

@ -268,7 +268,7 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
} }
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const { size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
if (code->kind() == WasmCode::kLazyStub) return sizeof(size_t); if (code == nullptr) return sizeof(size_t);
DCHECK_EQ(WasmCode::kFunction, code->kind()); DCHECK_EQ(WasmCode::kFunction, code->kind());
return kCodeHeaderSize + code->instructions().size() + return kCodeHeaderSize + code->instructions().size() +
code->reloc_info().size() + code->source_positions().size() + code->reloc_info().size() + code->source_positions().size() +
@ -290,7 +290,7 @@ void NativeModuleSerializer::WriteHeader(Writer* writer) {
} }
void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) { void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
if (code->kind() == WasmCode::kLazyStub) { if (code == nullptr) {
writer->Write(size_t{0}); writer->Write(size_t{0});
return; return;
} }
@ -500,6 +500,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
handler_table_offset, std::move(protected_instructions), tier, handler_table_offset, std::move(protected_instructions), tier,
WasmCode::kNoFlushICache); WasmCode::kNoFlushICache);
native_module_->set_code(fn_index, ret); native_module_->set_code(fn_index, ret);
native_module_->PatchJumpTable(fn_index, ret->instruction_start(),
WasmCode::kFlushICache);
// Relocate the code. // Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) | int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |

View File

@ -119,9 +119,9 @@ uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, const char* name) {
Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) { Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
// Wrap the code so it can be called as a JS function. // Wrap the code so it can be called as a JS function.
Link(); Link();
wasm::WasmCode* code = native_module_->code(index); Address target = native_module_->GetCallTargetForFunction(index);
MaybeHandle<Code> maybe_ret_code = compiler::CompileJSToWasmWrapper( MaybeHandle<Code> maybe_ret_code = compiler::CompileJSToWasmWrapper(
isolate_, test_module_ptr_, code->instruction_start(), index, isolate_, test_module_ptr_, target, index,
trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler); trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler);
Handle<Code> ret_code = maybe_ret_code.ToHandleChecked(); Handle<Code> ret_code = maybe_ret_code.ToHandleChecked();
Handle<JSFunction> ret = WasmExportedFunction::New( Handle<JSFunction> ret = WasmExportedFunction::New(
@ -167,9 +167,9 @@ void TestingModuleBuilder::PopulateIndirectFunctionTable() {
for (int j = 0; j < table_size; j++) { for (int j = 0; j < table_size; j++) {
WasmFunction& function = test_module_->functions[table.values[j]]; WasmFunction& function = test_module_->functions[table.values[j]];
int sig_id = test_module_->signature_map.Find(function.sig); int sig_id = test_module_->signature_map.Find(function.sig);
auto wasm_code = native_module_->code(function.func_index); auto target =
IndirectFunctionTableEntry(instance, j) native_module_->GetCallTargetForFunction(function.func_index);
.set(sig_id, *instance, wasm_code->instruction_start()); IndirectFunctionTableEntry(instance, j).set(sig_id, *instance, target);
} }
} }
} }

View File

@ -210,15 +210,13 @@ class TestingModuleBuilder {
return reinterpret_cast<Address>(globals_data_); return reinterpret_cast<Address>(globals_data_);
} }
void Link() { void Link() {
if (!linked_) { if (linked_) return;
Handle<WasmModuleObject> module(instance_object()->module_object());
CodeSpecialization code_specialization; CodeSpecialization code_specialization;
code_specialization.RelocateDirectCalls(native_module_); code_specialization.RelocateDirectCalls(native_module_);
code_specialization.ApplyToWholeModule(native_module_, module); code_specialization.ApplyToWholeModule(native_module_);
linked_ = true; linked_ = true;
native_module_->SetExecutable(true); native_module_->SetExecutable(true);
} }
}
ModuleEnv CreateModuleEnv(); ModuleEnv CreateModuleEnv();

View File

@ -6,6 +6,7 @@
#include "testing/gmock/include/gmock/gmock.h" #include "testing/gmock/include/gmock/gmock.h"
#include "src/wasm/function-compiler.h" #include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-code-manager.h"
namespace v8 { namespace v8 {
@ -143,6 +144,10 @@ enum ModuleStyle : int { Fixed = 0, Growable = 1 };
class WasmCodeManagerTest : public TestWithContext, class WasmCodeManagerTest : public TestWithContext,
public ::testing::WithParamInterface<ModuleStyle> { public ::testing::WithParamInterface<ModuleStyle> {
public: public:
static constexpr uint32_t kNumFunctions = 10;
static constexpr uint32_t kJumpTableSize = RoundUp<kCodeAlignment>(
kNumFunctions * JumpTableAssembler::kJumpTableSlotSize);
using NativeModulePtr = std::unique_ptr<NativeModule>; using NativeModulePtr = std::unique_ptr<NativeModule>;
NativeModulePtr AllocModule(WasmCodeManager* manager, size_t size, NativeModulePtr AllocModule(WasmCodeManager* manager, size_t size,
@ -150,8 +155,8 @@ class WasmCodeManagerTest : public TestWithContext,
bool can_request_more = style == Growable; bool can_request_more = style == Growable;
wasm::ModuleEnv env(nullptr, UseTrapHandler::kNoTrapHandler, wasm::ModuleEnv env(nullptr, UseTrapHandler::kNoTrapHandler,
RuntimeExceptionSupport::kNoRuntimeExceptionSupport); RuntimeExceptionSupport::kNoRuntimeExceptionSupport);
return manager->NewNativeModule(i_isolate(), size, 10, 0, can_request_more, return manager->NewNativeModule(i_isolate(), size, kNumFunctions, 0,
env); can_request_more, env);
} }
WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) { WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) {
@ -175,9 +180,7 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
WasmCodeManager manager(0 * page()); WasmCodeManager manager(0 * page());
CHECK_EQ(0, manager.remaining_uncommitted_code_space()); CHECK_EQ(0, manager.remaining_uncommitted_code_space());
NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam()); ASSERT_DEATH_IF_SUPPORTED(AllocModule(&manager, 1 * page(), GetParam()),
CHECK(native_module);
ASSERT_DEATH_IF_SUPPORTED(AddCode(native_module.get(), 0, 10),
"OOM in NativeModule::AddOwnedCode"); "OOM in NativeModule::AddOwnedCode");
} }
@ -186,7 +189,7 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space()); CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space());
NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam()); NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam());
CHECK(native_module); CHECK(native_module);
CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space()); CHECK_EQ(0, manager.remaining_uncommitted_code_space());
uint32_t index = 0; uint32_t index = 0;
WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment); WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
@ -196,7 +199,8 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted_code_space()); CHECK_EQ(0, manager.remaining_uncommitted_code_space());
code = AddCode(native_module.get(), index++, page() - 4 * kCodeAlignment); code = AddCode(native_module.get(), index++,
page() - 4 * kCodeAlignment - kJumpTableSize);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted_code_space()); CHECK_EQ(0, manager.remaining_uncommitted_code_space());
@ -206,14 +210,14 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
} }
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) { TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
WasmCodeManager manager(1 * page()); WasmCodeManager manager(3 * page());
NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam()); NativeModulePtr nm1 = AllocModule(&manager, 2 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), GetParam()); NativeModulePtr nm2 = AllocModule(&manager, 2 * page(), GetParam());
CHECK(nm1); CHECK(nm1);
CHECK(nm2); CHECK(nm2);
WasmCode* code = AddCode(nm1.get(), 0, 1 * page()); WasmCode* code = AddCode(nm1.get(), 0, 2 * page() - kJumpTableSize);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 1 * page()), ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 2 * page() - kJumpTableSize),
"OOM in NativeModule::AddOwnedCode"); "OOM in NativeModule::AddOwnedCode");
} }
@ -224,10 +228,10 @@ TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam()); NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam());
CHECK(nm1); CHECK(nm1);
CHECK(nm2); CHECK(nm2);
WasmCode* code = AddCode(nm1.get(), 0, 1 * page()); WasmCode* code = AddCode(nm1.get(), 0, 1 * page() - kJumpTableSize);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
CHECK_EQ(0, manager1.remaining_uncommitted_code_space()); CHECK_EQ(0, manager1.remaining_uncommitted_code_space());
code = AddCode(nm2.get(), 0, 1 * page()); code = AddCode(nm2.get(), 0, 1 * page() - kJumpTableSize);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
} }
@ -252,7 +256,7 @@ TEST_P(WasmCodeManagerTest, CommitIncrements) {
code = AddCode(nm.get(), 1, 2 * page()); code = AddCode(nm.get(), 1, 2 * page());
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page()); CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page());
code = AddCode(nm.get(), 2, page() - kCodeAlignment); code = AddCode(nm.get(), 2, page() - kCodeAlignment - kJumpTableSize);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page()); CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page());
} }