[wasm][x64] Improve jump table slot sequence.

R=clemensh@chromium.org

Change-Id: I367bb962d422e570b51c82bc7b3ebbd3fbedfd2b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1570018
Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60900}
This commit is contained in:
Michael Starzinger 2019-04-17 11:39:55 +02:00 committed by Commit Bot
parent 4863551111
commit 76f09525ce
2 changed files with 3 additions and 22 deletions

View File

@ -16,28 +16,9 @@ namespace wasm {
#if V8_TARGET_ARCH_X64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// TODO(clemensh): Try more efficient sequences.
// Alternative 1:
// [header]: mov r10, [lazy_compile_target]
// jmp r10
// [slot 0]: push [0]
// jmp [header] // pc-relative --> slot size: 10 bytes
//
// Alternative 2:
// [header]: lea r10, [rip - [header]]
// shr r10, 3 // compute index from offset
// push r10
// mov r10, [lazy_compile_target]
// jmp r10
// [slot 0]: call [header]
// ret // -> slot size: 5 bytes
// Use a push, because mov to an extended register takes 6 bytes.
pushq(Immediate(func_index)); // max 5 bytes
movq(kScratchRegister, uint64_t{lazy_compile_target}); // max 10 bytes
jmp(kScratchRegister); // 3 bytes
PatchConstPool(); // force patching entries for partial const pool
pushq(Immediate(func_index)); // max 5 bytes
EmitJumpSlot(lazy_compile_target); // always 5 bytes
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {

View File

@ -115,7 +115,7 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
// boundaries. The jump table line size has been chosen to satisfy this.
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 18;
static constexpr int kJumpTableSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableLineSize = 64;