[ptr-compr][x64] Support pointer decompressing by addressing mode,

pt.1

Compiler generates the below instructions for compressed pointer:

  [ DecompressTaggedPointer
  movl r8,[r8+0x13]
  REX.W addq r8,r14
  ]
  addl [r8+0x7],0xe6

This CL optimizes the pointer decompression by using complex
addressing mode in x64:

  movl r8,[r8+0x13]
  addl [r14+r8*1+0x7],0xe6

Bug: v8:13056, v8:7703
Change-Id: I755cdac407bab4ff2e78d4a6a164f13385f7c361
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3765067
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Hao A Xu <hao.a.xu@intel.com>
Cr-Commit-Position: refs/heads/main@{#81967}
This commit is contained in:
Hao Xu 2022-07-27 02:05:01 +08:00 committed by V8 LUCI CQ
parent 9ea588d52b
commit 6848a341ee
16 changed files with 748 additions and 74 deletions

View File

@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/arm/assembler-arm-inl.h"
#include "src/codegen/interface-descriptors.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
@ -474,6 +475,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
if (skip_interrupt_label) __ b(ge, skip_interrupt_label);
}
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ add(lhs, lhs, Operand(rhs));
}

View File

@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen/interface-descriptors.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
@ -544,6 +545,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
if (skip_interrupt_label) __ B(ge, skip_interrupt_label);
}
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (SmiValuesAre31Bits()) {
__ Add(lhs.W(), lhs.W(), Immediate(rhs));

View File

@ -166,6 +166,26 @@ class BaselineAssembler {
int32_t index);
inline void LoadPrototype(Register prototype, Register object);
// Loads compressed pointer or loads from compressed pointer. This is because
// X64 supports complex addressing mode, pointer decompression can be done by
// [%compressed_base + %r1 + K].
#if V8_TARGET_ARCH_X64
inline void LoadTaggedPointerField(TaggedRegister output, Register source,
int offset);
inline void LoadTaggedPointerField(TaggedRegister output,
TaggedRegister source, int offset);
inline void LoadTaggedPointerField(Register output, TaggedRegister source,
int offset);
inline void LoadTaggedAnyField(Register output, TaggedRegister source,
int offset);
inline void LoadTaggedAnyField(TaggedRegister output, TaggedRegister source,
int offset);
inline void LoadFixedArrayElement(Register output, TaggedRegister array,
int32_t index);
inline void LoadFixedArrayElement(TaggedRegister output, TaggedRegister array,
int32_t index);
#endif
// Falls through and sets scratch_and_result to 0 on failure, jumps to
// on_result on success.
inline void TryLoadOptimizedOsrCode(Register scratch_and_result,
@ -180,6 +200,14 @@ class BaselineAssembler {
inline void AddToInterruptBudgetAndJumpIfNotExceeded(
Register weight, Label* skip_interrupt_label);
inline void LdaContextSlot(Register context, uint32_t index, uint32_t depth);
inline void StaContextSlot(Register context, Register value, uint32_t index,
uint32_t depth);
inline void LdaModuleVariable(Register context, int cell_index,
uint32_t depth);
inline void StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth);
inline void AddSmi(Register lhs, Smi rhs);
inline void SmiUntag(Register value);
inline void SmiUntag(Register output, Register value);

View File

@ -727,12 +727,9 @@ void BaselineCompiler::VisitLdaContextSlot() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register context = scratch_scope.AcquireScratch();
LoadRegister(context, 0);
int depth = Uint(2);
for (; depth > 0; --depth) {
__ LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(Index(1)));
uint32_t index = Index(1);
uint32_t depth = Uint(2);
__ LdaContextSlot(context, index, depth);
}
void BaselineCompiler::VisitLdaImmutableContextSlot() { VisitLdaContextSlot(); }
@ -755,13 +752,9 @@ void BaselineCompiler::VisitStaContextSlot() {
DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister));
__ Move(value, kInterpreterAccumulatorRegister);
LoadRegister(context, 0);
int depth = Uint(2);
for (; depth > 0; --depth) {
__ LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
__ StoreTaggedFieldWithWriteBarrier(
context, Context::OffsetOfElementAt(iterator().GetIndexOperand(1)),
value);
uint32_t index = Index(1);
uint32_t depth = Uint(2);
__ StaContextSlot(context, value, index, depth);
}
void BaselineCompiler::VisitStaCurrentContextSlot() {
@ -871,26 +864,9 @@ void BaselineCompiler::VisitLdaModuleVariable() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register scratch = scratch_scope.AcquireScratch();
__ LoadContext(scratch);
int depth = Uint(1);
for (; depth > 0; --depth) {
__ LoadTaggedPointerField(scratch, scratch, Context::kPreviousOffset);
}
__ LoadTaggedPointerField(scratch, scratch, Context::kExtensionOffset);
int cell_index = Int(0);
if (cell_index > 0) {
__ LoadTaggedPointerField(scratch, scratch,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
__ LoadTaggedPointerField(scratch, scratch,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
__ LoadFixedArrayElement(scratch, scratch, cell_index);
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister, scratch,
Cell::kValueOffset);
int depth = Uint(1);
__ LdaModuleVariable(scratch, cell_index, depth);
}
void BaselineCompiler::VisitStaModuleVariable() {
@ -908,17 +884,7 @@ void BaselineCompiler::VisitStaModuleVariable() {
__ Move(value, kInterpreterAccumulatorRegister);
__ LoadContext(scratch);
int depth = Uint(1);
for (; depth > 0; --depth) {
__ LoadTaggedPointerField(scratch, scratch, Context::kPreviousOffset);
}
__ LoadTaggedPointerField(scratch, scratch, Context::kExtensionOffset);
__ LoadTaggedPointerField(scratch, scratch,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
__ LoadFixedArrayElement(scratch, scratch, cell_index);
__ StoreTaggedFieldWithWriteBarrier(scratch, Cell::kValueOffset, value);
__ StaModuleVariable(scratch, value, cell_index, depth);
}
void BaselineCompiler::VisitSetNamedProperty() {

View File

@ -9,6 +9,7 @@
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/interface-descriptors.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
@ -434,6 +435,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
}
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return;
__ add(lhs, Immediate(rhs));

View File

@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/loong64/assembler-loong64-inl.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
@ -442,6 +443,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
}
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Add_d(lhs, lhs, Operand(rhs));
}

View File

@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/mips/assembler-mips-inl.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
@ -454,6 +455,24 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
}
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Addu(lhs, lhs, Operand(rhs));
}

View File

@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/mips64/assembler-mips64-inl.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
@ -452,6 +453,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
}
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Daddu(lhs, lhs, Operand(rhs));
}

View File

@ -6,8 +6,9 @@
#define V8_BASELINE_PPC_BASELINE_ASSEMBLER_PPC_INL_H_
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/ppc/assembler-ppc-inl.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/ppc/assembler-ppc-inl.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
@ -614,6 +615,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
if (skip_interrupt_label) __ bge(skip_interrupt_label, cr0);
}
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return;
__ LoadSmiLiteral(r0, rhs);

View File

@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/interface-descriptors.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
namespace baseline {
@ -449,6 +450,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
}
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
ASM_CODE_COMMENT(masm_);
if (SmiValuesAre31Bits()) {

View File

@ -6,8 +6,9 @@
#define V8_BASELINE_S390_BASELINE_ASSEMBLER_S390_INL_H_
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/s390/assembler-s390-inl.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/s390/assembler-s390-inl.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
@ -612,6 +613,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
if (skip_interrupt_label) __ b(ge, skip_interrupt_label);
}
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return;
__ LoadSmiLiteral(r0, rhs);

View File

@ -9,6 +9,7 @@
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/x64/register-x64.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
@ -373,6 +374,47 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
__ StoreTaggedField(FieldOperand(target, offset), value);
}
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
Register source, int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
TaggedRegister source,
int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedPointerField(Register output,
TaggedRegister source,
int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output,
TaggedRegister source, int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(TaggedRegister output,
TaggedRegister source, int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadFixedArrayElement(Register output,
TaggedRegister array,
int32_t index) {
LoadTaggedAnyField(output, array,
FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output,
TaggedRegister array,
int32_t index) {
LoadTaggedAnyField(output, array,
FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Register feedback_vector,
FeedbackSlot slot,
@ -404,9 +446,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
// Decompresses pointer by complex addressing mode when necessary.
TaggedRegister tagged(feedback_cell);
LoadTaggedPointerField(tagged, feedback_cell,
JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset),
Immediate(weight));
if (skip_interrupt_label) {
DCHECK_LT(weight, 0);
@ -420,13 +464,114 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
// Decompresses pointer by complex addressing mode when necessary.
TaggedRegister tagged(feedback_cell);
LoadTaggedPointerField(tagged, feedback_cell,
JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
weight);
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight);
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
}
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
// [context] is coming from interpreter frame so it is already decompressed
// when pointer compression is enabled. In order to make use of complex
// addressing mode, any intermediate context pointer is loaded in compressed
// form.
if (depth == 0) {
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
} else {
TaggedRegister tagged(context);
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
Context::OffsetOfElementAt(index));
}
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
// [context] is coming from interpreter frame so it is already decompressed
// when pointer compression is enabled. In order to make use of complex
// addressing mode, any intermediate context pointer is loaded in compressed
// form.
if (depth > 0) {
TaggedRegister tagged(context);
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
}
if (COMPRESS_POINTERS_BOOL) {
// Decompress tagged pointer.
__ addq(tagged.reg(), kPtrComprCageBaseRegister);
}
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
// [context] is coming from interpreter frame so it is already decompressed.
// In order to make use of complex addressing mode when pointer compression is
// enabled, any intermediate context pointer is loaded in compressed form.
TaggedRegister tagged(context);
if (depth == 0) {
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
} else {
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
}
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
}
if (cell_index > 0) {
LoadTaggedPointerField(tagged, tagged,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(tagged, tagged,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(tagged, tagged, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
// [context] is coming from interpreter frame so it is already decompressed.
// In order to make use of complex addressing mode when pointer compression is
// enabled, any intermediate context pointer is loaded in compressed form.
TaggedRegister tagged(context);
if (depth == 0) {
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
} else {
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
}
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
}
LoadTaggedPointerField(tagged, tagged,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, tagged, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return;
if (SmiValuesAre31Bits()) {

View File

@ -203,9 +203,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
const TaggedRegister shared_function_info(rbx);
__ LoadTaggedPointerField(
rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movl(rbx, FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset));
shared_function_info,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movl(rbx,
FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
__ JumpIfIsInRange(
rbx, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
@ -1171,12 +1174,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
const TaggedRegister shared_function_info(kScratchRegister);
__ LoadTaggedPointerField(
kScratchRegister,
shared_function_info,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
kInterpreterBytecodeArrayRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kFunctionDataOffset));
FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
Label is_baseline;
GetSharedFunctionInfoBytecodeOrBaseline(
@ -1190,10 +1195,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &compile_lazy);
// Load the feedback vector from the closure.
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset));
FieldOperand(feedback_cell, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
@ -1356,11 +1362,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField(
feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_cell, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
@ -1554,10 +1560,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
const TaggedRegister shared_function_info(rbx);
__ LoadTaggedPointerField(
rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
shared_function_info,
FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
rbx, FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
__ j(not_equal, &builtin_trampoline, Label::kNear);
@ -1688,10 +1697,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Register closure = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset));
FieldOperand(feedback_cell, Cell::kValueOffset));
if (FLAG_debug_code) {
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
@ -2607,9 +2617,11 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ LoadRoot(rbx, RootIndex::kUndefinedValue);
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
const TaggedRegister shared_function_info(rcx);
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
shared_function_info,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
RelocInfo::CODE_TARGET, not_zero);
@ -2777,13 +2789,16 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
}
// Load deoptimization data from the code object.
const TaggedRegister deopt_data(rbx);
__ LoadTaggedPointerField(
rbx, FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
deopt_data,
FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
__ SmiUntagField(
rbx, FieldOperand(rbx, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
rbx,
FieldOperand(deopt_data, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
__ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
@ -5114,11 +5129,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get the Code object from the shared function info.
Register code_obj = rbx;
TaggedRegister shared_function_info(code_obj);
__ LoadTaggedPointerField(
code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
shared_function_info,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
code_obj,
FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
code_obj, FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
// Check if we have baseline code. For OSR entry it is safe to assume we
// always have baseline code.
@ -5150,10 +5167,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Load the feedback vector.
Register feedback_vector = r11;
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset));
FieldOperand(feedback_cell, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to

View File

@ -214,6 +214,15 @@ void TurboAssembler::LoadTaggedPointerField(Register destination,
}
}
void TurboAssembler::LoadTaggedPointerField(TaggedRegister destination,
Operand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
movl(destination.reg(), field_operand);
} else {
mov_tagged(destination.reg(), field_operand);
}
}
#ifdef V8_MAP_PACKING
void TurboAssembler::UnpackMapWord(Register r) {
// Clear the top two bytes (which may include metadata). Must be in sync with
@ -242,6 +251,15 @@ void TurboAssembler::LoadAnyTaggedField(Register destination,
}
}
void TurboAssembler::LoadAnyTaggedField(TaggedRegister destination,
Operand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
movl(destination.reg(), field_operand);
} else {
mov_tagged(destination.reg(), field_operand);
}
}
void TurboAssembler::PushTaggedPointerField(Operand field_operand,
Register scratch) {
if (COMPRESS_POINTERS_BOOL) {

View File

@ -572,6 +572,11 @@ class V8_EXPORT_PRIVATE TurboAssembler
// compression is enabled.
void LoadTaggedPointerField(Register destination, Operand field_operand);
// Loads a field containing a HeapObject but does not decompress it when
// pointer compression is enabled.
void LoadTaggedPointerField(TaggedRegister destination,
Operand field_operand);
// Loads a field containing a Smi and decompresses it if pointer compression
// is enabled.
void LoadTaggedSignedField(Register destination, Operand field_operand);
@ -579,6 +584,10 @@ class V8_EXPORT_PRIVATE TurboAssembler
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(Register destination, Operand field_operand);
// Loads a field containing any tagged value but does not decompress it when
// pointer compression is enabled.
void LoadAnyTaggedField(TaggedRegister destination, Operand field_operand);
// Loads a field containing a HeapObject, decompresses it if necessary and
// pushes full pointer to the stack. When pointer compression is enabled,
// uses |scratch| to decompress the value.
@ -936,6 +945,17 @@ inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
// Generate an Operand for loading a field from an object. Object pointer is a
// compressed pointer when pointer compression is enabled.
inline Operand FieldOperand(TaggedRegister object, int offset) {
if (COMPRESS_POINTERS_BOOL) {
return Operand(kPtrComprCageBaseRegister, object.reg(),
ScaleFactor::times_1, offset - kHeapObjectTag);
} else {
return Operand(object.reg(), offset - kHeapObjectTag);
}
}
// Generate an Operand for loading an indexed field from an object.
inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
int offset) {

View File

@ -73,6 +73,17 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
explicit constexpr Register(int code) : RegisterBase(code) {}
};
// Register that store tagged value. Tagged value is in compressed form when
// pointer compression is enabled.
class TaggedRegister {
public:
explicit TaggedRegister(Register reg) : reg_(reg) {}
Register reg() { return reg_; }
private:
Register reg_;
};
ASSERT_TRIVIALLY_COPYABLE(Register);
static_assert(sizeof(Register) <= sizeof(int),
"Register can efficiently be passed by value");