[ptrcomp] Remove the distinction of TaggedAny and TaggedPointer
Known-pointer decompression used to be distinct from any-tagged-value decompression, since the latter used to detect Smis and decompress them with sign extension. However, we got rid of this distinction when we introduced Smi-corrupting loads (allowing the top 32-bits of uncompressed Smis to be undefined), which means that the TaggedPointer and TaggedAny decompression is now identical. We can remove a bunch of duplicate code by removing this distinction. Change-Id: Id66671497d63ed885f9e537494c011317dfd4788 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4221398 Reviewed-by: Toon Verwaest <verwaest@chromium.org> Commit-Queue: Toon Verwaest <verwaest@chromium.org> Auto-Submit: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/main@{#85647}
This commit is contained in:
parent
1c162c83b1
commit
81aa89592b
@ -365,8 +365,7 @@ Local<Value> Context::GetEmbedderData(int index) {
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
// We read the full pointer value and then decompress it in order to avoid
|
||||
// dealing with potential endiannes issues.
|
||||
value =
|
||||
I::DecompressTaggedAnyField(embedder_data, static_cast<uint32_t>(value));
|
||||
value = I::DecompressTaggedField(embedder_data, static_cast<uint32_t>(value));
|
||||
#endif
|
||||
internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
|
||||
*reinterpret_cast<A*>(this));
|
||||
|
@ -807,7 +807,7 @@ class Internals {
|
||||
return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
|
||||
}
|
||||
|
||||
V8_INLINE static internal::Address DecompressTaggedAnyField(
|
||||
V8_INLINE static internal::Address DecompressTaggedField(
|
||||
internal::Address heap_object_ptr, uint32_t value) {
|
||||
internal::Address base =
|
||||
GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
|
||||
|
@ -717,7 +717,7 @@ Local<Value> Object::GetInternalField(int index) {
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
// We read the full pointer value and then decompress it in order to avoid
|
||||
// dealing with potential endiannes issues.
|
||||
value = I::DecompressTaggedAnyField(obj, static_cast<uint32_t>(value));
|
||||
value = I::DecompressTaggedField(obj, static_cast<uint32_t>(value));
|
||||
#endif
|
||||
internal::Isolate* isolate =
|
||||
internal::IsolateFromNeverReadOnlySpaceObject(obj);
|
||||
|
@ -309,7 +309,7 @@ void BaselineAssembler::Pop(T... registers) {
|
||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||
int offset) {
|
||||
__ ldr(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
@ -326,11 +326,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
||||
SmiUntag(output);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
__ ldr(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||
Register source, int offset) {
|
||||
__ ldrh(output, FieldMemOperand(source, offset));
|
||||
@ -372,7 +367,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Label* on_result,
|
||||
Label::Distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||
|
||||
@ -398,7 +393,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -421,7 +416,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -437,16 +432,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||
value);
|
||||
@ -455,33 +450,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
if (cell_index > 0) {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
} else {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularImportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||
// The actual array index is (-cell_index - 1).
|
||||
cell_index = -cell_index - 1;
|
||||
}
|
||||
LoadFixedArrayElement(context, context, cell_index);
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
Cell::kValueOffset);
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||
int cell_index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
|
@ -369,9 +369,9 @@ void BaselineAssembler::Pop(T... registers) {
|
||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
|
||||
__ LoadTaggedField(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||
@ -386,11 +386,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
||||
SmiUntag(output);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||
Register source, int offset) {
|
||||
__ Ldrh(output, FieldMemOperand(source, offset));
|
||||
@ -440,7 +435,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
||||
@ -463,7 +458,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
||||
@ -479,16 +474,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||
value);
|
||||
@ -497,33 +492,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
if (cell_index > 0) {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
} else {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularImportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||
// The actual array index is (-cell_index - 1).
|
||||
cell_index = -cell_index - 1;
|
||||
}
|
||||
LoadFixedArrayElement(context, context, cell_index);
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
Cell::kValueOffset);
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||
int cell_index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
|
@ -114,13 +114,12 @@ void BaselineAssembler::SmiUntag(Register output, Register value) {
|
||||
|
||||
void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
|
||||
int32_t index) {
|
||||
LoadTaggedAnyField(output, array,
|
||||
FixedArray::kHeaderSize + index * kTaggedSize);
|
||||
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
|
||||
__ LoadMap(prototype, object);
|
||||
LoadTaggedPointerField(prototype, prototype, Map::kPrototypeOffset);
|
||||
LoadTaggedField(prototype, prototype, Map::kPrototypeOffset);
|
||||
}
|
||||
void BaselineAssembler::LoadContext(Register output) {
|
||||
LoadRegister(output, interpreter::Register::current_context());
|
||||
|
@ -147,13 +147,11 @@ class BaselineAssembler {
|
||||
inline void TailCallBuiltin(Builtin builtin);
|
||||
inline void CallRuntime(Runtime::FunctionId function, int nargs);
|
||||
|
||||
inline void LoadTaggedPointerField(Register output, Register source,
|
||||
int offset);
|
||||
inline void LoadTaggedField(Register output, Register source, int offset);
|
||||
inline void LoadTaggedSignedField(Register output, Register source,
|
||||
int offset);
|
||||
inline void LoadTaggedSignedFieldAndUntag(Register output, Register source,
|
||||
int offset);
|
||||
inline void LoadTaggedAnyField(Register output, Register source, int offset);
|
||||
inline void LoadWord16FieldZeroExtend(Register output, Register source,
|
||||
int offset);
|
||||
inline void LoadWord8Field(Register output, Register source, int offset);
|
||||
@ -170,15 +168,11 @@ class BaselineAssembler {
|
||||
// X64 supports complex addressing mode, pointer decompression can be done by
|
||||
// [%compressed_base + %r1 + K].
|
||||
#if V8_TARGET_ARCH_X64
|
||||
inline void LoadTaggedPointerField(TaggedRegister output, Register source,
|
||||
inline void LoadTaggedField(TaggedRegister output, Register source,
|
||||
int offset);
|
||||
inline void LoadTaggedPointerField(TaggedRegister output,
|
||||
TaggedRegister source, int offset);
|
||||
inline void LoadTaggedPointerField(Register output, TaggedRegister source,
|
||||
inline void LoadTaggedField(TaggedRegister output, TaggedRegister source,
|
||||
int offset);
|
||||
inline void LoadTaggedAnyField(Register output, TaggedRegister source,
|
||||
int offset);
|
||||
inline void LoadTaggedAnyField(TaggedRegister output, TaggedRegister source,
|
||||
inline void LoadTaggedField(Register output, TaggedRegister source,
|
||||
int offset);
|
||||
inline void LoadFixedArrayElement(Register output, TaggedRegister array,
|
||||
int32_t index);
|
||||
|
@ -439,7 +439,7 @@ void BaselineCompiler::LoadFeedbackVector(Register output) {
|
||||
|
||||
void BaselineCompiler::LoadClosureFeedbackArray(Register output) {
|
||||
LoadFeedbackVector(output);
|
||||
__ LoadTaggedPointerField(output, output,
|
||||
__ LoadTaggedField(output, output,
|
||||
FeedbackVector::kClosureFeedbackCellArrayOffset);
|
||||
}
|
||||
|
||||
@ -754,7 +754,7 @@ void BaselineCompiler::VisitLdaCurrentContextSlot() {
|
||||
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
|
||||
Register context = scratch_scope.AcquireScratch();
|
||||
__ LoadContext(context);
|
||||
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
__ LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||
Context::OffsetOfElementAt(Index(0)));
|
||||
}
|
||||
|
||||
@ -1350,7 +1350,7 @@ void BaselineCompiler::VisitIntrinsicCreateJSGeneratorObject(
|
||||
void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode(
|
||||
interpreter::RegisterList args) {
|
||||
__ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
|
||||
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister,
|
||||
__ LoadTaggedField(kInterpreterAccumulatorRegister,
|
||||
kInterpreterAccumulatorRegister,
|
||||
JSGeneratorObject::kResumeModeOffset);
|
||||
}
|
||||
@ -2211,7 +2211,7 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() {
|
||||
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
|
||||
|
||||
Register context = scratch_scope.AcquireScratch();
|
||||
__ LoadTaggedAnyField(context, generator_object,
|
||||
__ LoadTaggedField(context, generator_object,
|
||||
JSGeneratorObject::kContextOffset);
|
||||
__ StoreContext(context);
|
||||
|
||||
|
@ -293,7 +293,7 @@ void BaselineAssembler::Pop(T... registers) {
|
||||
(__ Pop(registers), ...);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||
int offset) {
|
||||
__ mov(output, FieldOperand(source, offset));
|
||||
}
|
||||
@ -310,11 +310,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
||||
SmiUntag(output);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
__ mov(output, FieldOperand(source, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||
Register source, int offset) {
|
||||
__ movzx_w(output, FieldOperand(source, offset));
|
||||
@ -354,7 +349,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Label* on_result,
|
||||
Label::Distance distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, &fallthrough);
|
||||
|
||||
@ -378,7 +373,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||
Immediate(weight));
|
||||
@ -395,7 +390,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
DCHECK(!AreAliased(feedback_cell, weight));
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||
weight);
|
||||
@ -405,16 +400,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||
value);
|
||||
@ -423,33 +418,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
if (cell_index > 0) {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
} else {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularImportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||
// The actual array index is (-cell_index - 1).
|
||||
cell_index = -cell_index - 1;
|
||||
}
|
||||
LoadFixedArrayElement(context, context, cell_index);
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
Cell::kValueOffset);
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||
int cell_index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
|
@ -296,7 +296,7 @@ void BaselineAssembler::Pop(T... registers) {
|
||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||
int offset) {
|
||||
__ Ld_d(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
@ -310,10 +310,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
||||
LoadTaggedSignedField(output, source, offset);
|
||||
SmiUntag(output);
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
__ Ld_d(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||
Register source, int offset) {
|
||||
__ Ld_hu(output, FieldMemOperand(source, offset));
|
||||
@ -350,7 +346,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Label* on_result,
|
||||
Label::Distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
@ -374,7 +370,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -394,7 +390,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -410,16 +406,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||
value);
|
||||
@ -428,33 +424,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
if (cell_index > 0) {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
} else {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularImportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||
// The actual array index is (-cell_index - 1).
|
||||
cell_index = -cell_index - 1;
|
||||
}
|
||||
LoadFixedArrayElement(context, context, cell_index);
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
Cell::kValueOffset);
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||
int cell_index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
|
@ -304,7 +304,7 @@ void BaselineAssembler::Pop(T... registers) {
|
||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||
int offset) {
|
||||
__ Ld(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
@ -318,10 +318,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
||||
LoadTaggedSignedField(output, source, offset);
|
||||
SmiUntag(output);
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
__ Ld(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||
Register source, int offset) {
|
||||
__ Lhu(output, FieldMemOperand(source, offset));
|
||||
@ -360,7 +356,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Label* on_result,
|
||||
Label::Distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
@ -384,7 +380,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -404,7 +400,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -420,16 +416,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||
value);
|
||||
@ -438,33 +434,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
if (cell_index > 0) {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
} else {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularImportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||
// The actual array index is (-cell_index - 1).
|
||||
cell_index = -cell_index - 1;
|
||||
}
|
||||
LoadFixedArrayElement(context, context, cell_index);
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
Cell::kValueOffset);
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||
int cell_index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
|
@ -206,7 +206,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
|
||||
MemOperand operand, Label* target,
|
||||
Label::Distance) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
__ LoadTaggedPointerField(ip, operand, r0);
|
||||
__ LoadTaggedField(ip, operand, r0);
|
||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
||||
}
|
||||
|
||||
@ -214,7 +214,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
|
||||
Register value, Label* target,
|
||||
Label::Distance) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
__ LoadTaggedPointerField(ip, operand, r0);
|
||||
__ LoadTaggedField(ip, operand, r0);
|
||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
||||
}
|
||||
|
||||
@ -374,10 +374,10 @@ void BaselineAssembler::Pop(T... registers) {
|
||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||
int offset) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
|
||||
__ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||
@ -393,12 +393,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
||||
SmiUntag(output);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||
Register source, int offset) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
@ -443,7 +437,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Label* on_result,
|
||||
Label::Distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||
|
||||
@ -469,7 +463,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -494,7 +488,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -513,9 +507,9 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
@ -523,7 +517,7 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||
value);
|
||||
@ -533,34 +527,30 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||
uint32_t depth) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
if (cell_index > 0) {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
} else {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularImportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||
// The actual array index is (-cell_index - 1).
|
||||
cell_index = -cell_index - 1;
|
||||
}
|
||||
LoadFixedArrayElement(context, context, cell_index);
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
Cell::kValueOffset);
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||
int cell_index, uint32_t depth) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
|
@ -297,9 +297,9 @@ void BaselineAssembler::Pop(T... registers) {
|
||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
|
||||
__ LoadTaggedField(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||
int offset) {
|
||||
@ -311,10 +311,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
||||
LoadTaggedSignedField(output, source, offset);
|
||||
SmiUntag(output);
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||
Register source, int offset) {
|
||||
__ Lhu(output, FieldMemOperand(source, offset));
|
||||
@ -351,7 +347,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Label* on_result,
|
||||
Label::Distance) {
|
||||
Label fallthrough, clear_slot;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||
|
||||
@ -379,7 +375,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -401,7 +397,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -419,16 +415,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||
value);
|
||||
@ -437,33 +433,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
if (cell_index > 0) {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
} else {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularImportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||
// The actual array index is (-cell_index - 1).
|
||||
cell_index = -cell_index - 1;
|
||||
}
|
||||
LoadFixedArrayElement(context, context, cell_index);
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
Cell::kValueOffset);
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||
int cell_index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
|
@ -211,9 +211,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
MemOperand addr =
|
||||
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
|
||||
__ LoadTaggedPointerField(ip, addr, r0);
|
||||
__ LoadTaggedField(ip, addr, r0);
|
||||
} else {
|
||||
__ LoadTaggedPointerField(ip, operand, r0);
|
||||
__ LoadTaggedField(ip, operand, r0);
|
||||
}
|
||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
||||
}
|
||||
@ -226,9 +226,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
MemOperand addr =
|
||||
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
|
||||
__ LoadTaggedPointerField(ip, addr, r0);
|
||||
__ LoadTaggedField(ip, addr, r0);
|
||||
} else {
|
||||
__ LoadTaggedPointerField(ip, operand, r0);
|
||||
__ LoadTaggedField(ip, operand, r0);
|
||||
}
|
||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, ip, value, target);
|
||||
}
|
||||
@ -387,10 +387,10 @@ void BaselineAssembler::Pop(T... registers) {
|
||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||
int offset) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
|
||||
__ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||
@ -406,10 +406,10 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
||||
SmiUntag(output);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||
int offset) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
|
||||
__ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||
@ -456,7 +456,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Label* on_result,
|
||||
Label::Distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||
|
||||
@ -482,7 +482,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -507,7 +507,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
@ -525,16 +525,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||
value);
|
||||
@ -543,33 +543,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||
uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
if (cell_index > 0) {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
} else {
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularImportsOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||
// The actual array index is (-cell_index - 1).
|
||||
cell_index = -cell_index - 1;
|
||||
}
|
||||
LoadFixedArrayElement(context, context, cell_index);
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
Cell::kValueOffset);
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||
int cell_index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedPointerField(context, context,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
|
@ -287,9 +287,9 @@ void BaselineAssembler::Pop(T... registers) {
|
||||
(__ Pop(registers), ...);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
||||
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||
int offset) {
|
||||
@ -300,10 +300,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
||||
int offset) {
|
||||
__ SmiUntagField(output, FieldOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||
Register source, int offset) {
|
||||
__ movzxwq(output, FieldOperand(source, offset));
|
||||
@ -331,45 +327,31 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
|
||||
__ StoreTaggedField(FieldOperand(target, offset), value);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
|
||||
Register source, int offset) {
|
||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
|
||||
TaggedRegister source,
|
||||
void BaselineAssembler::LoadTaggedField(TaggedRegister output, Register source,
|
||||
int offset) {
|
||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
||||
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output,
|
||||
TaggedRegister source,
|
||||
void BaselineAssembler::LoadTaggedField(TaggedRegister output,
|
||||
TaggedRegister source, int offset) {
|
||||
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedField(Register output, TaggedRegister source,
|
||||
int offset) {
|
||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output,
|
||||
TaggedRegister source, int offset) {
|
||||
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedAnyField(TaggedRegister output,
|
||||
TaggedRegister source, int offset) {
|
||||
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
|
||||
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadFixedArrayElement(Register output,
|
||||
TaggedRegister array,
|
||||
int32_t index) {
|
||||
LoadTaggedAnyField(output, array,
|
||||
FixedArray::kHeaderSize + index * kTaggedSize);
|
||||
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output,
|
||||
TaggedRegister array,
|
||||
int32_t index) {
|
||||
LoadTaggedAnyField(output, array,
|
||||
FixedArray::kHeaderSize + index * kTaggedSize);
|
||||
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
|
||||
}
|
||||
|
||||
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
@ -389,8 +371,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
LoadFunction(feedback_cell);
|
||||
// Decompresses pointer by complex addressing mode when necessary.
|
||||
TaggedRegister tagged(feedback_cell);
|
||||
LoadTaggedPointerField(tagged, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
|
||||
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset),
|
||||
Immediate(weight));
|
||||
if (skip_interrupt_label) {
|
||||
@ -407,8 +388,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
LoadFunction(feedback_cell);
|
||||
// Decompresses pointer by complex addressing mode when necessary.
|
||||
TaggedRegister tagged(feedback_cell);
|
||||
LoadTaggedPointerField(tagged, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
|
||||
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight);
|
||||
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
||||
}
|
||||
@ -420,16 +400,16 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
// addressing mode, any intermediate context pointer is loaded in compressed
|
||||
// form.
|
||||
if (depth == 0) {
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||
Context::OffsetOfElementAt(index));
|
||||
} else {
|
||||
TaggedRegister tagged(context);
|
||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||
--depth;
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
||||
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, tagged,
|
||||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
}
|
||||
@ -442,10 +422,10 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
// form.
|
||||
if (depth > 0) {
|
||||
TaggedRegister tagged(context);
|
||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||
--depth;
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
||||
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||
}
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
// Decompress tagged pointer.
|
||||
@ -463,29 +443,26 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||
// enabled, any intermediate context pointer is loaded in compressed form.
|
||||
TaggedRegister tagged(context);
|
||||
if (depth == 0) {
|
||||
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(tagged, context, Context::kExtensionOffset);
|
||||
} else {
|
||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||
--depth;
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
||||
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
|
||||
LoadTaggedField(tagged, tagged, Context::kExtensionOffset);
|
||||
}
|
||||
if (cell_index > 0) {
|
||||
LoadTaggedPointerField(tagged, tagged,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
} else {
|
||||
LoadTaggedPointerField(tagged, tagged,
|
||||
SourceTextModule::kRegularImportsOffset);
|
||||
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularImportsOffset);
|
||||
// The actual array index is (-cell_index - 1).
|
||||
cell_index = -cell_index - 1;
|
||||
}
|
||||
LoadFixedArrayElement(tagged, tagged, cell_index);
|
||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
|
||||
Cell::kValueOffset);
|
||||
LoadTaggedField(kInterpreterAccumulatorRegister, tagged, Cell::kValueOffset);
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||
@ -495,17 +472,16 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||
// enabled, any intermediate context pointer is loaded in compressed form.
|
||||
TaggedRegister tagged(context);
|
||||
if (depth == 0) {
|
||||
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
|
||||
LoadTaggedField(tagged, context, Context::kExtensionOffset);
|
||||
} else {
|
||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
||||
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||
--depth;
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
||||
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||
}
|
||||
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
|
||||
LoadTaggedField(tagged, tagged, Context::kExtensionOffset);
|
||||
}
|
||||
LoadTaggedPointerField(tagged, tagged,
|
||||
SourceTextModule::kRegularExportsOffset);
|
||||
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
|
||||
|
||||
// The actual array index is (cell_index - 1).
|
||||
cell_index -= 1;
|
||||
|
@ -213,7 +213,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
||||
// -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
|
||||
// -----------------------------------
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
|
||||
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
|
||||
@ -423,7 +423,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
}
|
||||
__ Cmp(scratch1, INTERPRETER_DATA_TYPE);
|
||||
__ B(ne, &done);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
sfi_data,
|
||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||
__ Bind(&done);
|
||||
@ -446,10 +446,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ AssertGeneratorObject(x1);
|
||||
|
||||
// Load suspended function and context.
|
||||
__ LoadTaggedPointerField(
|
||||
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(cp,
|
||||
FieldMemOperand(x4, JSFunction::kContextOffset));
|
||||
__ LoadTaggedField(x4,
|
||||
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
|
||||
|
||||
// Flood function if we are stepping.
|
||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||
@ -477,7 +476,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ B(lo, &stack_overflow);
|
||||
|
||||
// Get number of arguments for generator function.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Ldrh(w10, FieldMemOperand(
|
||||
x10, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
@ -493,8 +492,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
|
||||
|
||||
// Poke receiver into highest claimed slot.
|
||||
__ LoadTaggedPointerField(
|
||||
x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
|
||||
__ LoadTaggedField(x5,
|
||||
FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
|
||||
__ Poke(x5, __ ReceiverOperand(x10));
|
||||
|
||||
// ----------- S t a t e -------------
|
||||
@ -507,7 +506,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// -----------------------------------
|
||||
|
||||
// Copy the function arguments from the generator object's register file.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x5,
|
||||
FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||
{
|
||||
@ -518,7 +517,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Bind(&loop);
|
||||
__ Sub(x10, x10, 1);
|
||||
__ LoadAnyTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
|
||||
__ LoadTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
|
||||
__ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
|
||||
__ Cbnz(x10, &loop);
|
||||
__ Bind(&done);
|
||||
@ -527,9 +526,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// Underlying function needs to have bytecode available.
|
||||
if (v8_flags.debug_code) {
|
||||
Label is_baseline;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
|
||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, x3, x0, &is_baseline);
|
||||
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
|
||||
@ -539,7 +538,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
|
||||
// Resume (Ignition/TurboFan) generator object.
|
||||
{
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Ldrh(w0, FieldMemOperand(
|
||||
x0, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
@ -549,7 +548,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ Mov(x3, x1);
|
||||
__ Mov(x1, x4);
|
||||
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
|
||||
__ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
|
||||
__ LoadTaggedField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
|
||||
__ JumpCodeObject(x2);
|
||||
}
|
||||
|
||||
@ -561,8 +560,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ Push(x1, padreg, x4, x5);
|
||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||
__ Pop(padreg, x1);
|
||||
__ LoadTaggedPointerField(
|
||||
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(x4,
|
||||
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||
}
|
||||
__ B(&stepping_prepared);
|
||||
|
||||
@ -572,8 +571,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ Push(x1, padreg);
|
||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||
__ Pop(padreg, x1);
|
||||
__ LoadTaggedPointerField(
|
||||
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(x4,
|
||||
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||
}
|
||||
__ B(&stepping_prepared);
|
||||
|
||||
@ -1108,11 +1107,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = temps.AcquireX();
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ AssertFeedbackVector(feedback_vector, x4);
|
||||
|
||||
// Check the tiering state.
|
||||
@ -1270,9 +1268,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Get the bytecode array from the function object and load it into
|
||||
// kInterpreterBytecodeArrayRegister.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
kInterpreterBytecodeArrayRegister,
|
||||
FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset));
|
||||
|
||||
@ -1288,17 +1286,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
__ B(ne, &compile_lazy);
|
||||
|
||||
// Load the feedback vector from the closure.
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ LoadTaggedPointerField(
|
||||
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ LoadTaggedField(x7,
|
||||
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
||||
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
||||
__ B(ne, &push_stack_frame);
|
||||
@ -1480,16 +1477,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
__ bind(&is_baseline);
|
||||
{
|
||||
// Load the feedback vector from the closure.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
// allocate it.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
||||
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
||||
@ -1732,16 +1729,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
// get the custom trampoline, otherwise grab the entry address of the global
|
||||
// trampoline.
|
||||
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
|
||||
__ CompareObjectType(x1, kInterpreterDispatchTableRegister,
|
||||
kInterpreterDispatchTableRegister,
|
||||
INTERPRETER_DATA_TYPE);
|
||||
__ B(ne, &builtin_trampoline);
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
|
||||
__ LoadCodeEntry(x1, x1);
|
||||
__ B(&trampoline_loaded);
|
||||
@ -1997,7 +1994,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
|
||||
// Load deoptimization data from the code object.
|
||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x1,
|
||||
FieldMemOperand(
|
||||
x0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||
@ -2348,7 +2345,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
|
||||
Label ok, fail;
|
||||
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
|
||||
__ LoadTaggedPointerField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
|
||||
__ LoadTaggedField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
|
||||
__ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
|
||||
__ Cmp(x13, FIXED_ARRAY_TYPE);
|
||||
__ B(eq, &ok);
|
||||
@ -2394,7 +2391,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
__ Add(argc, argc, len); // Update new argc.
|
||||
__ Bind(&loop);
|
||||
__ Sub(len, len, 1);
|
||||
__ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
|
||||
__ LoadTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
|
||||
__ CmpTagged(scratch, the_hole_value);
|
||||
__ Csel(scratch, scratch, undefined_value, ne);
|
||||
__ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
|
||||
@ -2426,7 +2423,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
if (mode == CallOrConstructMode::kConstruct) {
|
||||
Label new_target_constructor, new_target_not_constructor;
|
||||
__ JumpIfSmi(x3, &new_target_not_constructor);
|
||||
__ LoadTaggedPointerField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
|
||||
__ LoadTaggedField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
|
||||
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
|
||||
__ TestAndBranchIfAnySet(x5, Map::Bits1::IsConstructorBit::kMask,
|
||||
&new_target_constructor);
|
||||
@ -2486,14 +2483,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
// -----------------------------------
|
||||
__ AssertCallableFunction(x1);
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||
|
||||
// Enter the context of the function; ToObject has to run in the function
|
||||
// context, and we also need to take the global proxy from the function
|
||||
// context in case of conversion.
|
||||
__ LoadTaggedPointerField(cp,
|
||||
FieldMemOperand(x1, JSFunction::kContextOffset));
|
||||
__ LoadTaggedField(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
|
||||
// We need to convert the receiver for non-native sloppy mode functions.
|
||||
Label done_convert;
|
||||
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
|
||||
@ -2545,7 +2541,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
__ Pop(cp, x1, x0, padreg);
|
||||
__ SmiUntag(x0);
|
||||
}
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Bind(&convert_receiver);
|
||||
}
|
||||
@ -2579,7 +2575,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
|
||||
// Load [[BoundArguments]] into x2 and length of that into x4.
|
||||
Label no_bound_arguments;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
|
||||
__ SmiUntagField(bound_argc,
|
||||
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
|
||||
@ -2681,7 +2677,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
__ SlotAddress(copy_to, 1);
|
||||
__ Bind(&loop);
|
||||
__ Sub(counter, counter, 1);
|
||||
__ LoadAnyTaggedField(scratch,
|
||||
__ LoadTaggedField(scratch,
|
||||
MemOperand(bound_argv, kTaggedSize, PostIndex));
|
||||
__ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
|
||||
__ Cbnz(counter, &loop);
|
||||
@ -2703,7 +2699,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
||||
__ AssertBoundFunction(x1);
|
||||
|
||||
// Patch the receiver to [[BoundThis]].
|
||||
__ LoadAnyTaggedField(x10,
|
||||
__ LoadTaggedField(x10,
|
||||
FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
|
||||
__ Poke(x10, __ ReceiverOperand(x0));
|
||||
|
||||
@ -2711,7 +2707,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
||||
Generate_PushBoundArguments(masm);
|
||||
|
||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||
RelocInfo::CODE_TARGET);
|
||||
@ -2812,7 +2808,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
||||
Label call_generic_stub;
|
||||
|
||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
|
||||
__ TestAndBranchIfAllClear(
|
||||
@ -2844,13 +2840,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
||||
Label done;
|
||||
__ CmpTagged(x1, x3);
|
||||
__ B(ne, &done);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ Bind(&done);
|
||||
}
|
||||
|
||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
@ -2874,8 +2870,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||
__ JumpIfSmi(target, &non_constructor);
|
||||
|
||||
// Check if target has a [[Construct]] internal method.
|
||||
__ LoadTaggedPointerField(map,
|
||||
FieldMemOperand(target, HeapObject::kMapOffset));
|
||||
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
|
||||
{
|
||||
Register flags = x2;
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
||||
@ -2976,12 +2971,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||
Register scratch = x10;
|
||||
Label allocate_vector, done;
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
vector, FieldMemOperand(kWasmInstanceRegister,
|
||||
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||
__ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
|
||||
__ LoadTaggedPointerField(vector,
|
||||
FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||
__ JumpIfSmi(vector, &allocate_vector);
|
||||
__ bind(&done);
|
||||
__ Push(vector, xzr);
|
||||
@ -3121,7 +3115,7 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
|
||||
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
|
||||
// We had to prepare the parameters for the Call: we have to put the context
|
||||
// into kContextRegister.
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
kContextRegister, // cp(x27)
|
||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||
WasmInstanceObject::kNativeContextOffset)));
|
||||
@ -3210,7 +3204,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data,
|
||||
MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset));
|
||||
__ Stp(wasm_instance, function_data,
|
||||
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
kContextRegister,
|
||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||
WasmInstanceObject::kNativeContextOffset)));
|
||||
@ -3256,8 +3250,7 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
|
||||
wasm::JumpBuffer::Retired);
|
||||
}
|
||||
Register parent = tmp2;
|
||||
__ LoadAnyTaggedField(
|
||||
parent,
|
||||
__ LoadTaggedField(parent,
|
||||
FieldMemOperand(active_continuation,
|
||||
WasmContinuationObject::kParentOffset));
|
||||
|
||||
@ -3293,7 +3286,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset);
|
||||
__ Move(tmp2, Smi::FromInt(WasmSuspenderObject::kInactive));
|
||||
__ StoreTaggedField(tmp2, state_loc);
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
suspender,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
|
||||
@ -3322,17 +3315,16 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
|
||||
Register function_data,
|
||||
Register wasm_instance) {
|
||||
Register closure = function_data;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
function_data,
|
||||
MemOperand(
|
||||
closure,
|
||||
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
function_data,
|
||||
FieldMemOperand(function_data,
|
||||
SharedFunctionInfo::kFunctionDataOffset));
|
||||
FieldMemOperand(function_data, SharedFunctionInfo::kFunctionDataOffset));
|
||||
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
wasm_instance,
|
||||
FieldMemOperand(function_data,
|
||||
WasmExportedFunctionData::kInstanceOffset));
|
||||
@ -3573,7 +3565,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
||||
// A result of AllocateSuspender is in the return register.
|
||||
__ Str(suspender, MemOperand(fp, kSuspenderOffset));
|
||||
DEFINE_SCOPED(target_continuation);
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
target_continuation,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||
FREE_REG(suspender);
|
||||
@ -4229,7 +4221,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
||||
__ Mov(scratch, 1);
|
||||
__ Str(scratch, MemOperand(thread_in_wasm_flag_addr, 0));
|
||||
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
function_entry,
|
||||
FieldMemOperand(function_data,
|
||||
WasmExportedFunctionData::kInternalOffset));
|
||||
@ -4497,7 +4489,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
||||
regs.ResetExcept(promise, suspender, continuation);
|
||||
|
||||
DEFINE_REG(suspender_continuation);
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
suspender_continuation,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||
if (v8_flags.debug_code) {
|
||||
@ -4518,7 +4510,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
||||
// Update roots.
|
||||
// -------------------------------------------
|
||||
DEFINE_REG(caller);
|
||||
__ LoadAnyTaggedField(caller,
|
||||
__ LoadTaggedField(caller,
|
||||
FieldMemOperand(suspender_continuation,
|
||||
WasmContinuationObject::kParentOffset));
|
||||
int32_t active_continuation_offset =
|
||||
@ -4526,7 +4518,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
||||
RootIndex::kActiveContinuation);
|
||||
__ Str(caller, MemOperand(kRootRegister, active_continuation_offset));
|
||||
DEFINE_REG(parent);
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
int32_t active_suspender_offset =
|
||||
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||
@ -4597,7 +4589,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
||||
// Load suspender from closure.
|
||||
// -------------------------------------------
|
||||
DEFINE_REG(sfi);
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
sfi,
|
||||
MemOperand(
|
||||
closure,
|
||||
@ -4607,12 +4599,12 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
||||
// RecordWriteField calls later.
|
||||
DEFINE_PINNED(suspender, WriteBarrierDescriptor::ObjectRegister());
|
||||
DEFINE_REG(function_data);
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
function_data,
|
||||
FieldMemOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
|
||||
// The write barrier uses a fixed register for the host object (rdi). The next
|
||||
// barrier is on the suspender, so load it in rdi directly.
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
suspender,
|
||||
FieldMemOperand(function_data, WasmResumeData::kSuspenderOffset));
|
||||
// Check the suspender state.
|
||||
@ -4671,10 +4663,9 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
||||
FREE_REG(suspender);
|
||||
DEFINE_PINNED(target_continuation, WriteBarrierDescriptor::ObjectRegister());
|
||||
suspender = target_continuation;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
target_continuation,
|
||||
FieldMemOperand(suspender,
|
||||
WasmSuspenderObject::kContinuationOffset));
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||
suspender = no_reg;
|
||||
|
||||
__ StoreTaggedField(
|
||||
@ -5321,12 +5312,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
||||
DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
|
||||
name));
|
||||
|
||||
__ LoadAnyTaggedField(data,
|
||||
__ LoadTaggedField(data,
|
||||
FieldMemOperand(callback, AccessorInfo::kDataOffset));
|
||||
__ LoadRoot(undef, RootIndex::kUndefinedValue);
|
||||
__ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate()));
|
||||
__ LoadTaggedPointerField(
|
||||
name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
||||
__ LoadTaggedField(name,
|
||||
FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
||||
|
||||
// PropertyCallbackArguments:
|
||||
// receiver, data, return value, return value default, isolate, holder,
|
||||
@ -5697,10 +5688,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
|
||||
// Get the InstructionStream object from the shared function info.
|
||||
Register code_obj = x22;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
code_obj,
|
||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
code_obj,
|
||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||
|
||||
@ -5732,11 +5723,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
|
||||
// Load the feedback vector.
|
||||
Register feedback_vector = x2;
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
|
@ -64,7 +64,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
}
|
||||
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0);
|
||||
__ bne(&done);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
sfi_data,
|
||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset), r0);
|
||||
|
||||
@ -120,10 +120,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
|
||||
// Get the InstructionStream object from the shared function info.
|
||||
Register code_obj = r9;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset),
|
||||
r0);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
code_obj,
|
||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||
|
||||
@ -155,12 +155,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
|
||||
// Load the feedback vector.
|
||||
Register feedback_vector = r5;
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||
r0);
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
@ -431,7 +430,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
|
||||
// Load deoptimization data from the code object.
|
||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r4,
|
||||
FieldMemOperand(
|
||||
r3, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset),
|
||||
@ -495,7 +494,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
||||
// -- sp[4*kSystemPointerSize]: context
|
||||
// -----------------------------------
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
|
||||
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
|
||||
@ -660,10 +659,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ AssertGeneratorObject(r4);
|
||||
|
||||
// Load suspended function and context.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
||||
__ LoadTaggedPointerField(cp, FieldMemOperand(r7, JSFunction::kContextOffset),
|
||||
r0);
|
||||
__ LoadTaggedField(cp, FieldMemOperand(r7, JSFunction::kContextOffset), r0);
|
||||
|
||||
// Flood function if we are stepping.
|
||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||
@ -703,12 +701,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// -----------------------------------
|
||||
|
||||
// Copy the function arguments from the generator object's register file.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||
__ LoadU16(
|
||||
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
__ subi(r6, r6, Operand(kJSArgcReceiverSlots));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
|
||||
r0);
|
||||
{
|
||||
@ -719,14 +717,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ blt(&done_loop);
|
||||
__ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
|
||||
__ add(scratch, r5, r10);
|
||||
__ LoadAnyTaggedField(
|
||||
scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
|
||||
__ Push(scratch);
|
||||
__ b(&loop);
|
||||
__ bind(&done_loop);
|
||||
|
||||
// Push receiver.
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset), r0);
|
||||
__ Push(scratch);
|
||||
}
|
||||
@ -734,9 +732,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// Underlying function needs to have bytecode available.
|
||||
if (v8_flags.debug_code) {
|
||||
Label is_baseline;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, r6, ip, &is_baseline);
|
||||
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
|
||||
@ -746,7 +744,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
|
||||
// Resume (Ignition/TurboFan) generator object.
|
||||
{
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||
__ LoadU16(r3, FieldMemOperand(
|
||||
r3, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
@ -756,8 +754,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ mr(r6, r4);
|
||||
__ mr(r4, r7);
|
||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||
__ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset),
|
||||
r0);
|
||||
__ LoadTaggedField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0);
|
||||
__ JumpCodeObject(r5);
|
||||
}
|
||||
|
||||
@ -769,7 +766,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ PushRoot(RootIndex::kTheHoleValue);
|
||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||
__ Pop(r4);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
||||
}
|
||||
__ b(&stepping_prepared);
|
||||
@ -780,7 +777,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ Push(r4);
|
||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||
__ Pop(r4);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
||||
}
|
||||
__ b(&stepping_prepared);
|
||||
@ -1212,12 +1209,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = ip;
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||
r0);
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
|
||||
__ AssertFeedbackVector(feedback_vector, r11);
|
||||
|
||||
// Check for an tiering state.
|
||||
@ -1378,10 +1374,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Get the bytecode array from the function object and load it into
|
||||
// kInterpreterBytecodeArrayRegister.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||
// Load original bytecode array or the debug copy.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
kInterpreterBytecodeArrayRegister,
|
||||
FieldMemOperand(r7, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||
|
||||
@ -1397,17 +1393,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
__ bne(&compile_lazy);
|
||||
|
||||
// Load the feedback vector from the closure.
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||
r0);
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
|
||||
__ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
|
||||
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
|
||||
@ -1589,17 +1584,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
__ bind(&is_baseline);
|
||||
{
|
||||
// Load the feedback vector from the closure.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
||||
r0);
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
// allocate it.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
|
||||
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
|
||||
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE), r0);
|
||||
@ -1773,16 +1768,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
// get the custom trampoline, otherwise grab the entry address of the global
|
||||
// trampoline.
|
||||
__ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||
__ CompareObjectType(r5, kInterpreterDispatchTableRegister,
|
||||
kInterpreterDispatchTableRegister,
|
||||
INTERPRETER_DATA_TYPE);
|
||||
__ bne(&builtin_trampoline);
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset),
|
||||
r0);
|
||||
__ LoadCodeEntry(r5, r5);
|
||||
@ -2240,8 +2235,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
|
||||
Label ok, fail;
|
||||
__ AssertNotSmi(r5);
|
||||
__ LoadTaggedPointerField(scratch,
|
||||
FieldMemOperand(r5, HeapObject::kMapOffset), r0);
|
||||
__ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset),
|
||||
r0);
|
||||
__ LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
||||
__ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
|
||||
__ beq(&ok);
|
||||
@ -2276,7 +2271,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
|
||||
__ mtctr(r7);
|
||||
__ bind(&loop);
|
||||
__ LoadTaggedPointerField(scratch, MemOperand(r5, kTaggedSize), r0);
|
||||
__ LoadTaggedField(scratch, MemOperand(r5, kTaggedSize), r0);
|
||||
__ addi(r5, r5, Operand(kTaggedSize));
|
||||
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
|
||||
__ bne(&skip);
|
||||
@ -2311,8 +2306,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
if (mode == CallOrConstructMode::kConstruct) {
|
||||
Label new_target_constructor, new_target_not_constructor;
|
||||
__ JumpIfSmi(r6, &new_target_not_constructor);
|
||||
__ LoadTaggedPointerField(scratch,
|
||||
FieldMemOperand(r6, HeapObject::kMapOffset), r0);
|
||||
__ LoadTaggedField(scratch, FieldMemOperand(r6, HeapObject::kMapOffset),
|
||||
r0);
|
||||
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
||||
__ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
|
||||
__ bne(&new_target_constructor, cr0);
|
||||
@ -2395,14 +2390,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
// -----------------------------------
|
||||
__ AssertCallableFunction(r4);
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||
|
||||
// Enter the context of the function; ToObject has to run in the function
|
||||
// context, and we also need to take the global proxy from the function
|
||||
// context in case of conversion.
|
||||
__ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
|
||||
r0);
|
||||
__ LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
|
||||
// We need to convert the receiver for non-native sloppy mode functions.
|
||||
Label done_convert;
|
||||
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
|
||||
@ -2456,7 +2450,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
__ Pop(r3, r4);
|
||||
__ SmiUntag(r3);
|
||||
}
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||
__ bind(&convert_receiver);
|
||||
}
|
||||
@ -2487,7 +2481,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
|
||||
// Load [[BoundArguments]] into r5 and length of that into r7.
|
||||
Label no_bound_arguments;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset), r0);
|
||||
__ SmiUntag(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC, r0);
|
||||
__ beq(&no_bound_arguments, cr0);
|
||||
@ -2536,7 +2530,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
__ subi(r7, r7, Operand(1));
|
||||
__ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
|
||||
__ add(scratch, scratch, r5);
|
||||
__ LoadAnyTaggedField(scratch, MemOperand(scratch), r0);
|
||||
__ LoadTaggedField(scratch, MemOperand(scratch), r0);
|
||||
__ Push(scratch);
|
||||
__ bdnz(&loop);
|
||||
__ bind(&done);
|
||||
@ -2559,15 +2553,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
||||
__ AssertBoundFunction(r4);
|
||||
|
||||
// Patch the receiver to [[BoundThis]].
|
||||
__ LoadAnyTaggedField(
|
||||
r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset), r0);
|
||||
__ LoadTaggedField(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset),
|
||||
r0);
|
||||
__ StoreReceiver(r6, r3, ip);
|
||||
|
||||
// Push the [[BoundArguments]] onto the stack.
|
||||
Generate_PushBoundArguments(masm);
|
||||
|
||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||
RelocInfo::CODE_TARGET);
|
||||
@ -2667,7 +2661,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
||||
Label call_generic_stub;
|
||||
|
||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
|
||||
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||
@ -2699,12 +2693,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
||||
Label skip;
|
||||
__ CompareTagged(r4, r6);
|
||||
__ bne(&skip);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
||||
__ bind(&skip);
|
||||
|
||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
@ -2728,8 +2722,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||
__ JumpIfSmi(target, &non_constructor);
|
||||
|
||||
// Check if target has a [[Construct]] internal method.
|
||||
__ LoadTaggedPointerField(
|
||||
map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
|
||||
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
|
||||
{
|
||||
Register flags = r5;
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
||||
@ -2817,15 +2810,15 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||
Register scratch = ip;
|
||||
Label allocate_vector, done;
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
vector,
|
||||
FieldMemOperand(kWasmInstanceRegister,
|
||||
WasmInstanceObject::kFeedbackVectorsOffset),
|
||||
scratch);
|
||||
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
||||
__ AddS64(vector, vector, scratch);
|
||||
__ LoadTaggedPointerField(
|
||||
vector, FieldMemOperand(vector, FixedArray::kHeaderSize), scratch);
|
||||
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize),
|
||||
scratch);
|
||||
__ JumpIfSmi(vector, &allocate_vector);
|
||||
__ bind(&done);
|
||||
__ push(kWasmInstanceRegister);
|
||||
@ -3530,16 +3523,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
||||
|
||||
__ push(receiver);
|
||||
// Push data from AccessorInfo.
|
||||
__ LoadAnyTaggedField(
|
||||
scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
|
||||
__ push(scratch);
|
||||
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
||||
__ Push(scratch, scratch);
|
||||
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
|
||||
__ Push(scratch, holder);
|
||||
__ Push(Smi::zero()); // should_throw_on_error -> false
|
||||
__ LoadTaggedPointerField(
|
||||
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
|
||||
__ push(scratch);
|
||||
|
||||
// v8::PropertyCallbackInfo::args_ array and name handle.
|
||||
|
@ -155,7 +155,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register func_info = temps.Acquire();
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Load32U(func_info,
|
||||
FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset));
|
||||
@ -353,7 +353,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
|
||||
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
|
||||
Label::Distance::kNear);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
sfi_data,
|
||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||
|
||||
@ -377,10 +377,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ AssertGeneratorObject(a1);
|
||||
|
||||
// Load suspended function and context.
|
||||
__ LoadTaggedPointerField(
|
||||
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(cp,
|
||||
FieldMemOperand(a4, JSFunction::kContextOffset));
|
||||
__ LoadTaggedField(a4,
|
||||
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
|
||||
|
||||
// Flood function if we are stepping.
|
||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||
@ -417,12 +416,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// context allocation for any variables in generators, the actual argument
|
||||
// values have already been copied into the context and these dummy values
|
||||
// will never be used.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Lhu(a3,
|
||||
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
__ SubWord(a3, a3, Operand(kJSArgcReceiverSlots));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
t1,
|
||||
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||
{
|
||||
@ -431,23 +430,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ SubWord(a3, a3, Operand(1));
|
||||
__ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
|
||||
__ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
|
||||
__ LoadAnyTaggedField(
|
||||
kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
|
||||
__ LoadTaggedField(kScratchReg,
|
||||
FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
|
||||
__ Push(kScratchReg);
|
||||
__ Branch(&loop);
|
||||
__ bind(&done_loop);
|
||||
// Push receiver.
|
||||
__ LoadAnyTaggedField(
|
||||
kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
|
||||
__ LoadTaggedField(kScratchReg,
|
||||
FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
|
||||
__ Push(kScratchReg);
|
||||
}
|
||||
|
||||
// Underlying function needs to have bytecode available.
|
||||
if (v8_flags.debug_code) {
|
||||
Label is_baseline;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
|
||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
|
||||
__ GetObjectType(a3, a3, a3);
|
||||
@ -458,7 +457,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
|
||||
// Resume (Ignition/TurboFan) generator object.
|
||||
{
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Lhu(a0, FieldMemOperand(
|
||||
a0, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
@ -468,7 +467,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ Move(a3, a1);
|
||||
__ Move(a1, a4);
|
||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||
__ LoadTaggedPointerField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
|
||||
__ LoadTaggedField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
|
||||
__ JumpCodeObject(a2);
|
||||
}
|
||||
|
||||
@ -481,8 +480,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||
__ Pop(a1);
|
||||
}
|
||||
__ LoadTaggedPointerField(
|
||||
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(a4,
|
||||
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||
__ Branch(&stepping_prepared);
|
||||
|
||||
__ bind(&prepare_step_in_suspended_generator);
|
||||
@ -492,8 +491,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||
__ Pop(a1);
|
||||
}
|
||||
__ LoadTaggedPointerField(
|
||||
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(a4,
|
||||
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||
__ Branch(&stepping_prepared);
|
||||
|
||||
__ bind(&stack_overflow);
|
||||
@ -1130,10 +1129,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
Register feedback_vector = a2;
|
||||
// Get the bytecode array from the function object and load it into
|
||||
// kInterpreterBytecodeArrayRegister.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
kScratchReg,
|
||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
kInterpreterBytecodeArrayRegister,
|
||||
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
|
||||
Label is_baseline;
|
||||
@ -1147,17 +1146,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
__ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
|
||||
|
||||
// Load the feedback vector from the closure.
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ LoadTaggedPointerField(
|
||||
a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ LoadTaggedField(a4,
|
||||
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
|
||||
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
|
||||
Label::Distance::kNear);
|
||||
@ -1331,16 +1329,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
__ bind(&is_baseline);
|
||||
{
|
||||
// Load the feedback vector from the closure.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
// allocate it.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
|
||||
__ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
|
||||
@ -1511,16 +1509,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
// get the custom trampoline, otherwise grab the entry address of the global
|
||||
// trampoline.
|
||||
__ LoadWord(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
|
||||
__ GetObjectType(t0, kInterpreterDispatchTableRegister,
|
||||
kInterpreterDispatchTableRegister);
|
||||
__ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
|
||||
Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear);
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
|
||||
__ LoadCodeEntry(t0, t0);
|
||||
__ BranchShort(&trampoline_loaded);
|
||||
@ -1778,7 +1776,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
|
||||
// Load deoptimization data from the code object.
|
||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
a1,
|
||||
MemOperand(a0,
|
||||
InstructionStream::kDeoptimizationDataOrInterpreterDataOffset -
|
||||
@ -2152,7 +2150,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
__ SubWord(scratch, sp, Operand(scratch));
|
||||
__ LoadRoot(hole_value, RootIndex::kTheHoleValue);
|
||||
__ bind(&loop);
|
||||
__ LoadTaggedPointerField(a5, MemOperand(src));
|
||||
__ LoadTaggedField(a5, MemOperand(src));
|
||||
__ AddWord(src, src, kTaggedSize);
|
||||
__ Branch(&push, ne, a5, Operand(hole_value), Label::Distance::kNear);
|
||||
__ LoadRoot(a5, RootIndex::kUndefinedValue);
|
||||
@ -2190,8 +2188,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
__ JumpIfSmi(a3, &new_target_not_constructor);
|
||||
__ LoadTaggedPointerField(scratch,
|
||||
FieldMemOperand(a3, HeapObject::kMapOffset));
|
||||
__ LoadTaggedField(scratch, FieldMemOperand(a3, HeapObject::kMapOffset));
|
||||
__ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
||||
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
|
||||
__ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg),
|
||||
@ -2271,7 +2268,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
__ AssertCallableFunction(a1);
|
||||
|
||||
Label class_constructor;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
|
||||
__ And(kScratchReg, a3,
|
||||
@ -2281,8 +2278,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
// Enter the context of the function; ToObject has to run in the function
|
||||
// context, and we also need to take the global proxy from the function
|
||||
// context in case of conversion.
|
||||
__ LoadTaggedPointerField(cp,
|
||||
FieldMemOperand(a1, JSFunction::kContextOffset));
|
||||
__ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
||||
// We need to convert the receiver for non-native sloppy mode functions.
|
||||
Label done_convert;
|
||||
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
|
||||
@ -2337,7 +2333,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
__ Pop(a0, a1);
|
||||
__ SmiUntag(a0);
|
||||
}
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ bind(&convert_receiver);
|
||||
}
|
||||
@ -2379,7 +2375,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
Register bound_argv = a2;
|
||||
// Load [[BoundArguments]] into a2 and length of that into a4.
|
||||
Label no_bound_arguments;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
|
||||
__ SmiUntagField(bound_argc,
|
||||
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
|
||||
@ -2423,7 +2419,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
__ SubWord(a4, a4, Operand(1));
|
||||
__ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear);
|
||||
__ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2);
|
||||
__ LoadAnyTaggedField(kScratchReg, MemOperand(a5));
|
||||
__ LoadTaggedField(kScratchReg, MemOperand(a5));
|
||||
__ Push(kScratchReg);
|
||||
__ Branch(&loop);
|
||||
__ bind(&done_loop);
|
||||
@ -2449,8 +2445,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
__ LoadAnyTaggedField(
|
||||
scratch, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
|
||||
__ StoreReceiver(scratch, a0, kScratchReg);
|
||||
}
|
||||
|
||||
@ -2458,7 +2454,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
||||
Generate_PushBoundArguments(masm);
|
||||
|
||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||
RelocInfo::CODE_TARGET);
|
||||
@ -2548,7 +2544,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
||||
Label call_generic_stub;
|
||||
|
||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Load32U(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
|
||||
__ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||
@ -2587,12 +2583,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
||||
__ Branch(&skip, ne, a1, Operand(a3), Label::Distance::kNear);
|
||||
#endif
|
||||
}
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ bind(&skip);
|
||||
|
||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
@ -2615,7 +2611,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||
temps.Include(t0, t1);
|
||||
Register map = temps.Acquire();
|
||||
Register scratch = temps.Acquire();
|
||||
__ LoadTaggedPointerField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
|
||||
__ LoadTaggedField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
|
||||
__ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
|
||||
__ Branch(&non_constructor, eq, scratch, Operand(zero_reg));
|
||||
@ -3366,7 +3362,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
||||
__ SubWord(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize);
|
||||
__ StoreWord(receiver,
|
||||
MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize));
|
||||
__ LoadAnyTaggedField(scratch,
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldMemOperand(callback, AccessorInfo::kDataOffset));
|
||||
__ StoreWord(scratch,
|
||||
MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize));
|
||||
@ -3385,8 +3381,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
||||
DCHECK_EQ(0, Smi::zero().ptr());
|
||||
__ StoreWord(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) *
|
||||
kSystemPointerSize));
|
||||
__ LoadTaggedPointerField(
|
||||
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
||||
__ StoreWord(scratch, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
|
||||
// v8::PropertyCallbackInfo::args_ array and name handle.
|
||||
@ -3677,10 +3673,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
|
||||
// Get the InstructionStream object from the shared function info.
|
||||
Register code_obj = s1;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
code_obj,
|
||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
code_obj,
|
||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||
|
||||
@ -3719,11 +3715,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
Register feedback_vector = a2;
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
// allocate it.
|
||||
|
@ -65,7 +65,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
}
|
||||
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE));
|
||||
__ bne(&done);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
sfi_data,
|
||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||
|
||||
@ -120,10 +120,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
|
||||
// Get the InstructionStream object from the shared function info.
|
||||
Register code_obj = r8;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
code_obj,
|
||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
code_obj,
|
||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||
|
||||
@ -155,11 +155,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
|
||||
// Load the feedback vector.
|
||||
Register feedback_vector = r4;
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
@ -320,7 +319,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
|
||||
// Load deoptimization data from the code object.
|
||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r3,
|
||||
FieldMemOperand(
|
||||
r2, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||
@ -472,7 +471,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
||||
// -- sp[4*kSystemPointerSize]: context
|
||||
// -----------------------------------
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
|
||||
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
|
||||
@ -633,10 +632,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ AssertGeneratorObject(r3);
|
||||
|
||||
// Load suspended function and context.
|
||||
__ LoadTaggedPointerField(
|
||||
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(cp,
|
||||
FieldMemOperand(r6, JSFunction::kContextOffset));
|
||||
__ LoadTaggedField(r6,
|
||||
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
|
||||
|
||||
// Flood function if we are stepping.
|
||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||
@ -677,12 +675,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// -----------------------------------
|
||||
|
||||
// Copy the function arguments from the generator object's register file.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadU16(
|
||||
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
__ SubS64(r5, r5, Operand(kJSArgcReceiverSlots));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r4,
|
||||
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||
{
|
||||
@ -692,24 +690,24 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ blt(&done_loop);
|
||||
__ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2));
|
||||
__ la(scratch, MemOperand(r4, r1));
|
||||
__ LoadAnyTaggedField(scratch,
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldMemOperand(scratch, FixedArray::kHeaderSize));
|
||||
__ Push(scratch);
|
||||
__ b(&loop);
|
||||
__ bind(&done_loop);
|
||||
|
||||
// Push receiver.
|
||||
__ LoadAnyTaggedField(
|
||||
scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
|
||||
__ Push(scratch);
|
||||
}
|
||||
|
||||
// Underlying function needs to have bytecode available.
|
||||
if (v8_flags.debug_code) {
|
||||
Label is_baseline;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
|
||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, r5, ip, &is_baseline);
|
||||
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
|
||||
@ -719,7 +717,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
|
||||
// Resume (Ignition/TurboFan) generator object.
|
||||
{
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadS16(
|
||||
r2,
|
||||
@ -730,7 +728,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ mov(r5, r3);
|
||||
__ mov(r3, r6);
|
||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||
__ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
||||
__ LoadTaggedField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
||||
__ JumpCodeObject(r4);
|
||||
}
|
||||
|
||||
@ -742,8 +740,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ PushRoot(RootIndex::kTheHoleValue);
|
||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||
__ Pop(r3);
|
||||
__ LoadTaggedPointerField(
|
||||
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(r6,
|
||||
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||
}
|
||||
__ b(&stepping_prepared);
|
||||
|
||||
@ -753,8 +751,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ Push(r3);
|
||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||
__ Pop(r3);
|
||||
__ LoadTaggedPointerField(
|
||||
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(r6,
|
||||
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||
}
|
||||
__ b(&stepping_prepared);
|
||||
|
||||
@ -1245,11 +1243,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = ip;
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ AssertFeedbackVector(feedback_vector, r1);
|
||||
|
||||
// Check for an tiering state.
|
||||
@ -1406,10 +1403,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Get the bytecode array from the function object and load it into
|
||||
// kInterpreterBytecodeArrayRegister.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
// Load original bytecode array or the debug copy.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
kInterpreterBytecodeArrayRegister,
|
||||
FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
|
||||
|
||||
@ -1425,17 +1422,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
__ bne(&compile_lazy);
|
||||
|
||||
// Load the feedback vector from the closure.
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ LoadTaggedPointerField(
|
||||
r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ LoadTaggedField(r6,
|
||||
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
|
||||
__ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE));
|
||||
__ bne(&push_stack_frame);
|
||||
@ -1611,16 +1607,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
__ bind(&is_baseline);
|
||||
{
|
||||
// Load the feedback vector from the closure.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
// allocate it.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
|
||||
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE));
|
||||
@ -1792,16 +1788,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
// get the custom trampoline, otherwise grab the entry address of the global
|
||||
// trampoline.
|
||||
__ LoadU64(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
|
||||
__ CompareObjectType(r4, kInterpreterDispatchTableRegister,
|
||||
kInterpreterDispatchTableRegister,
|
||||
INTERPRETER_DATA_TYPE);
|
||||
__ bne(&builtin_trampoline);
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
|
||||
__ LoadCodeEntry(r4, r4);
|
||||
__ b(&trampoline_loaded);
|
||||
@ -2240,8 +2236,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
// Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
|
||||
Label ok, fail;
|
||||
__ AssertNotSmi(r4);
|
||||
__ LoadTaggedPointerField(scratch,
|
||||
FieldMemOperand(r4, HeapObject::kMapOffset));
|
||||
__ LoadTaggedField(scratch, FieldMemOperand(r4, HeapObject::kMapOffset));
|
||||
__ LoadS16(scratch,
|
||||
FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
||||
__ CmpS64(scratch, Operand(FIXED_ARRAY_TYPE));
|
||||
@ -2277,7 +2272,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
|
||||
__ mov(r1, r6);
|
||||
__ bind(&loop);
|
||||
__ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
|
||||
__ LoadTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
|
||||
__ la(r4, MemOperand(r4, kTaggedSize));
|
||||
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
|
||||
__ bne(&skip, Label::kNear);
|
||||
@ -2312,8 +2307,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
if (mode == CallOrConstructMode::kConstruct) {
|
||||
Label new_target_constructor, new_target_not_constructor;
|
||||
__ JumpIfSmi(r5, &new_target_not_constructor);
|
||||
__ LoadTaggedPointerField(scratch,
|
||||
FieldMemOperand(r5, HeapObject::kMapOffset));
|
||||
__ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
|
||||
__ LoadU8(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
||||
__ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift));
|
||||
__ bne(&new_target_constructor);
|
||||
@ -2397,14 +2391,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
// -----------------------------------
|
||||
__ AssertCallableFunction(r3);
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||
|
||||
// Enter the context of the function; ToObject has to run in the function
|
||||
// context, and we also need to take the global proxy from the function
|
||||
// context in case of conversion.
|
||||
__ LoadTaggedPointerField(cp,
|
||||
FieldMemOperand(r3, JSFunction::kContextOffset));
|
||||
__ LoadTaggedField(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
|
||||
// We need to convert the receiver for non-native sloppy mode functions.
|
||||
Label done_convert;
|
||||
__ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
|
||||
@ -2458,7 +2451,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
__ Pop(r2, r3);
|
||||
__ SmiUntag(r2);
|
||||
}
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ bind(&convert_receiver);
|
||||
}
|
||||
@ -2489,7 +2482,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
|
||||
// Load [[BoundArguments]] into r4 and length of that into r6.
|
||||
Label no_bound_arguments;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
|
||||
__ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
|
||||
__ LoadAndTestP(r6, r6);
|
||||
@ -2535,7 +2528,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
__ bind(&loop);
|
||||
__ SubS64(r1, r6, Operand(1));
|
||||
__ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2));
|
||||
__ LoadAnyTaggedField(scratch, MemOperand(r4, r1), r0);
|
||||
__ LoadTaggedField(scratch, MemOperand(r4, r1), r0);
|
||||
__ Push(scratch);
|
||||
__ SubS64(r6, r6, Operand(1));
|
||||
__ bgt(&loop);
|
||||
@ -2559,7 +2552,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
||||
__ AssertBoundFunction(r3);
|
||||
|
||||
// Patch the receiver to [[BoundThis]].
|
||||
__ LoadAnyTaggedField(r5,
|
||||
__ LoadTaggedField(r5,
|
||||
FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
|
||||
__ StoreReceiver(r5, r2, r1);
|
||||
|
||||
@ -2567,7 +2560,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
||||
Generate_PushBoundArguments(masm);
|
||||
|
||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||
RelocInfo::CODE_TARGET);
|
||||
@ -2667,7 +2660,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
||||
Label call_generic_stub;
|
||||
|
||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
|
||||
__ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||
@ -2698,12 +2691,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
||||
Label skip;
|
||||
__ CompareTagged(r3, r5);
|
||||
__ bne(&skip);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ bind(&skip);
|
||||
|
||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
@ -2727,8 +2720,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||
__ JumpIfSmi(target, &non_constructor);
|
||||
|
||||
// Check if target has a [[Construct]] internal method.
|
||||
__ LoadTaggedPointerField(map,
|
||||
FieldMemOperand(target, HeapObject::kMapOffset));
|
||||
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
|
||||
{
|
||||
Register flags = r4;
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
||||
@ -2811,13 +2803,12 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||
Register scratch = r0;
|
||||
Label allocate_vector, done;
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
vector, FieldMemOperand(kWasmInstanceRegister,
|
||||
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
||||
__ AddS64(vector, vector, scratch);
|
||||
__ LoadTaggedPointerField(vector,
|
||||
FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||
__ JumpIfSmi(vector, &allocate_vector);
|
||||
__ bind(&done);
|
||||
__ push(kWasmInstanceRegister);
|
||||
@ -3504,16 +3495,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
||||
|
||||
__ push(receiver);
|
||||
// Push data from AccessorInfo.
|
||||
__ LoadAnyTaggedField(
|
||||
scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
|
||||
__ push(scratch);
|
||||
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
||||
__ Push(scratch, scratch);
|
||||
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
|
||||
__ Push(scratch, holder);
|
||||
__ Push(Smi::zero()); // should_throw_on_error -> false
|
||||
__ LoadTaggedPointerField(
|
||||
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
|
||||
__ push(scratch);
|
||||
|
||||
// v8::PropertyCallbackInfo::args_ array and name handle.
|
||||
|
@ -171,8 +171,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
||||
// -----------------------------------
|
||||
|
||||
const TaggedRegister shared_function_info(rbx);
|
||||
__ LoadTaggedPointerField(
|
||||
shared_function_info,
|
||||
__ LoadTaggedField(shared_function_info,
|
||||
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ movl(rbx,
|
||||
FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset));
|
||||
@ -701,7 +700,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
|
||||
__ j(not_equal, &done, Label::kNear);
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||
|
||||
__ bind(&done);
|
||||
@ -729,9 +728,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
|
||||
|
||||
// Load suspended function and context.
|
||||
__ LoadTaggedPointerField(
|
||||
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
||||
__ LoadTaggedField(rdi,
|
||||
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
||||
|
||||
// Flood function if we are stepping.
|
||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||
@ -768,12 +767,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// -----------------------------------
|
||||
|
||||
// Copy the function arguments from the generator object's register file.
|
||||
__ LoadTaggedPointerField(
|
||||
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedField(rcx,
|
||||
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ movzxwq(
|
||||
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
__ decq(rcx); // Exclude receiver.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||
|
||||
{
|
||||
@ -781,24 +780,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ bind(&loop);
|
||||
__ decq(rcx);
|
||||
__ j(less, &done_loop, Label::kNear);
|
||||
__ PushTaggedAnyField(
|
||||
__ PushTaggedField(
|
||||
FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize),
|
||||
decompr_scratch1);
|
||||
__ jmp(&loop);
|
||||
__ bind(&done_loop);
|
||||
|
||||
// Push the receiver.
|
||||
__ PushTaggedPointerField(
|
||||
FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
|
||||
__ PushTaggedField(FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
|
||||
decompr_scratch1);
|
||||
}
|
||||
|
||||
// Underlying function needs to have bytecode available.
|
||||
if (v8_flags.debug_code) {
|
||||
Label is_baseline, ok;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
|
||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister,
|
||||
&is_baseline);
|
||||
@ -816,7 +814,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// Resume (Ignition/TurboFan) generator object.
|
||||
{
|
||||
__ PushReturnAddressFrom(rax);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ movzxwq(rax, FieldOperand(
|
||||
rax, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
@ -824,7 +822,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// pass in the generator object. In ordinary calls, new.target is always
|
||||
// undefined because generator functions are non-constructable.
|
||||
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
||||
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
|
||||
__ LoadTaggedField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
|
||||
__ JumpCodeObject(rcx);
|
||||
}
|
||||
|
||||
@ -837,8 +835,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ PushRoot(RootIndex::kTheHoleValue);
|
||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||
__ Pop(rdx);
|
||||
__ LoadTaggedPointerField(
|
||||
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(rdi,
|
||||
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||
}
|
||||
__ jmp(&stepping_prepared);
|
||||
|
||||
@ -848,8 +846,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ Push(rdx);
|
||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||
__ Pop(rdx);
|
||||
__ LoadTaggedPointerField(
|
||||
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||
__ LoadTaggedField(rdi,
|
||||
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||
}
|
||||
__ jmp(&stepping_prepared);
|
||||
|
||||
@ -1019,11 +1017,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
// Get the bytecode array from the function object and load it into
|
||||
// kInterpreterBytecodeArrayRegister.
|
||||
const TaggedRegister shared_function_info(kScratchRegister);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
shared_function_info,
|
||||
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
kInterpreterBytecodeArrayRegister,
|
||||
__ LoadTaggedField(kInterpreterBytecodeArrayRegister,
|
||||
FieldOperand(shared_function_info,
|
||||
SharedFunctionInfo::kFunctionDataOffset));
|
||||
|
||||
@ -1040,9 +1037,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Load the feedback vector from the closure.
|
||||
TaggedRegister feedback_cell(feedback_vector);
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(feedback_vector,
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||
|
||||
Label push_stack_frame;
|
||||
@ -1220,9 +1217,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
{
|
||||
// Load the feedback vector from the closure.
|
||||
TaggedRegister feedback_cell(feedback_vector);
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(feedback_vector,
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
@ -1417,16 +1414,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
// trampoline.
|
||||
__ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
|
||||
const TaggedRegister shared_function_info(rbx);
|
||||
__ LoadTaggedPointerField(
|
||||
shared_function_info,
|
||||
__ LoadTaggedField(shared_function_info,
|
||||
FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
rbx, FieldOperand(shared_function_info,
|
||||
__ LoadTaggedField(rbx,
|
||||
FieldOperand(shared_function_info,
|
||||
SharedFunctionInfo::kFunctionDataOffset));
|
||||
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
|
||||
__ j(not_equal, &builtin_trampoline, Label::kNear);
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
|
||||
__ LoadCodeEntry(rbx, rbx);
|
||||
__ jmp(&trampoline_loaded, Label::kNear);
|
||||
@ -1555,9 +1551,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
TaggedRegister feedback_cell(feedback_vector);
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(feedback_vector,
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||
__ AssertFeedbackVector(feedback_vector);
|
||||
|
||||
@ -2097,7 +2093,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
__ cmpl(current, num);
|
||||
__ j(equal, &done, Label::kNear);
|
||||
// Turn the hole into undefined as we go.
|
||||
__ LoadAnyTaggedField(value, FieldOperand(src, current, times_tagged_size,
|
||||
__ LoadTaggedField(value, FieldOperand(src, current, times_tagged_size,
|
||||
FixedArray::kHeaderSize));
|
||||
__ CompareRoot(value, RootIndex::kTheHoleValue);
|
||||
__ j(not_equal, &push, Label::kNear);
|
||||
@ -2213,8 +2209,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
StackArgumentsAccessor args(rax);
|
||||
__ AssertCallableFunction(rdi);
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedField(rdx,
|
||||
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
// ----------- S t a t e -------------
|
||||
// -- rax : the number of arguments
|
||||
// -- rdx : the shared function info.
|
||||
@ -2224,7 +2220,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
// Enter the context of the function; ToObject has to run in the function
|
||||
// context, and we also need to take the global proxy from the function
|
||||
// context in case of conversion.
|
||||
__ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
||||
__ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
||||
// We need to convert the receiver for non-native sloppy mode functions.
|
||||
Label done_convert;
|
||||
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
|
||||
@ -2281,7 +2277,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
||||
__ Pop(rax);
|
||||
__ SmiUntagUnsigned(rax);
|
||||
}
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ bind(&convert_receiver);
|
||||
}
|
||||
@ -2312,8 +2308,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
|
||||
// Load [[BoundArguments]] into rcx and length of that into rbx.
|
||||
Label no_bound_arguments;
|
||||
__ LoadTaggedPointerField(
|
||||
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
||||
__ LoadTaggedField(rcx,
|
||||
FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
||||
__ SmiUntagFieldUnsigned(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
__ testl(rbx, rbx);
|
||||
__ j(zero, &no_bound_arguments);
|
||||
@ -2354,7 +2350,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
// Push [[BoundArguments]] to the stack.
|
||||
{
|
||||
Label loop;
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
||||
__ SmiUntagFieldUnsigned(rbx,
|
||||
FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
@ -2364,8 +2360,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
// offset in order to be able to move decl(rbx) right before the loop
|
||||
// condition. This is necessary in order to avoid flags corruption by
|
||||
// pointer decompression code.
|
||||
__ LoadAnyTaggedField(
|
||||
r12, FieldOperand(rcx, rbx, times_tagged_size,
|
||||
__ LoadTaggedField(r12,
|
||||
FieldOperand(rcx, rbx, times_tagged_size,
|
||||
FixedArray::kHeaderSize - kTaggedSize));
|
||||
__ Push(r12);
|
||||
__ decl(rbx);
|
||||
@ -2391,15 +2387,14 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
||||
|
||||
// Patch the receiver to [[BoundThis]].
|
||||
StackArgumentsAccessor args(rax);
|
||||
__ LoadAnyTaggedField(rbx,
|
||||
FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
|
||||
__ LoadTaggedField(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
|
||||
__ movq(args.GetReceiverOperand(), rbx);
|
||||
|
||||
// Push the [[BoundArguments]] onto the stack.
|
||||
Generate_PushBoundArguments(masm);
|
||||
|
||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||
RelocInfo::CODE_TARGET);
|
||||
@ -2498,8 +2493,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
||||
|
||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||
const TaggedRegister shared_function_info(rcx);
|
||||
__ LoadTaggedPointerField(
|
||||
shared_function_info,
|
||||
__ LoadTaggedField(shared_function_info,
|
||||
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ testl(FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset),
|
||||
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||
@ -2528,13 +2522,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
||||
Label done;
|
||||
__ cmpq(rdi, rdx);
|
||||
__ j(not_equal, &done, Label::kNear);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
@ -2677,7 +2671,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
|
||||
// Load deoptimization data from the code object.
|
||||
const TaggedRegister deopt_data(rbx);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
deopt_data,
|
||||
FieldOperand(
|
||||
rax, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||
@ -2776,11 +2770,10 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||
__ Push(rbp);
|
||||
__ Move(rbp, rsp);
|
||||
__ Push(Immediate(StackFrame::TypeToMarker(StackFrame::WASM)));
|
||||
__ LoadTaggedPointerField(
|
||||
vector, FieldOperand(kWasmInstanceRegister,
|
||||
__ LoadTaggedField(vector,
|
||||
FieldOperand(kWasmInstanceRegister,
|
||||
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||
__ LoadTaggedPointerField(vector,
|
||||
FieldOperand(vector, func_index, times_tagged_size,
|
||||
__ LoadTaggedField(vector, FieldOperand(vector, func_index, times_tagged_size,
|
||||
FixedArray::kHeaderSize));
|
||||
Label allocate_vector, done;
|
||||
__ JumpIfSmi(vector, &allocate_vector);
|
||||
@ -2931,7 +2924,7 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
|
||||
__ pushq(function_data);
|
||||
// We had to prepare the parameters for the Call: we have to put the context
|
||||
// into rsi.
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
rsi,
|
||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||
WasmInstanceObject::kNativeContextOffset)));
|
||||
@ -3012,7 +3005,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data,
|
||||
__ Move(GCScanSlotPlace, 2);
|
||||
__ Push(wasm_instance);
|
||||
__ Push(function_data);
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
kContextRegister,
|
||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||
WasmInstanceObject::kNativeContextOffset)));
|
||||
@ -3052,7 +3045,7 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
|
||||
wasm::JumpBuffer::Retired);
|
||||
|
||||
Register parent = tmp2;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
parent,
|
||||
FieldOperand(active_continuation, WasmContinuationObject::kParentOffset));
|
||||
|
||||
@ -3083,7 +3076,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
|
||||
__ StoreTaggedSignedField(
|
||||
FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
|
||||
Smi::FromInt(WasmSuspenderObject::kInactive));
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
suspender, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
|
||||
Label undefined;
|
||||
@ -3111,19 +3104,19 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
|
||||
Register wasm_instance) {
|
||||
Register closure = function_data;
|
||||
Register shared_function_info = closure;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
shared_function_info,
|
||||
MemOperand(
|
||||
closure,
|
||||
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
||||
closure = no_reg;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
function_data,
|
||||
MemOperand(shared_function_info,
|
||||
SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag));
|
||||
shared_function_info = no_reg;
|
||||
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
wasm_instance,
|
||||
MemOperand(function_data,
|
||||
WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
|
||||
@ -3224,7 +3217,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
||||
Register suspender = rax; // Fixed.
|
||||
__ movq(MemOperand(rbp, kSuspenderOffset), suspender);
|
||||
Register target_continuation = rax;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
target_continuation,
|
||||
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||
suspender = no_reg;
|
||||
@ -3728,7 +3721,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
||||
|
||||
Register function_entry = function_data;
|
||||
Register scratch = r12;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
function_entry,
|
||||
FieldOperand(function_data, WasmExportedFunctionData::kInternalOffset));
|
||||
__ LoadExternalPointerField(
|
||||
@ -4081,7 +4074,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
||||
// live: [rax, rbx, rcx]
|
||||
|
||||
Register suspender_continuation = rdx;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
suspender_continuation,
|
||||
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||
#ifdef DEBUG
|
||||
@ -4102,12 +4095,12 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
||||
// Update roots.
|
||||
// -------------------------------------------
|
||||
Register caller = rcx;
|
||||
__ LoadAnyTaggedField(caller,
|
||||
__ LoadTaggedField(caller,
|
||||
FieldOperand(suspender_continuation,
|
||||
WasmContinuationObject::kParentOffset));
|
||||
__ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), caller);
|
||||
Register parent = rdx;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), parent);
|
||||
parent = no_reg;
|
||||
@ -4172,19 +4165,19 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
||||
// Load suspender from closure.
|
||||
// -------------------------------------------
|
||||
Register sfi = closure;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
sfi,
|
||||
MemOperand(
|
||||
closure,
|
||||
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
||||
Register function_data = sfi;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
function_data,
|
||||
FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
|
||||
// The write barrier uses a fixed register for the host object (rdi). The next
|
||||
// barrier is on the suspender, so load it in rdi directly.
|
||||
Register suspender = rdi;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
suspender, FieldOperand(function_data, WasmResumeData::kSuspenderOffset));
|
||||
// Check the suspender state.
|
||||
Label suspender_is_suspended;
|
||||
@ -4233,7 +4226,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
||||
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender);
|
||||
|
||||
Register target_continuation = suspender;
|
||||
__ LoadAnyTaggedField(
|
||||
__ LoadTaggedField(
|
||||
target_continuation,
|
||||
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||
suspender = no_reg;
|
||||
@ -4848,7 +4841,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
||||
// Insert additional parameters into the stack frame above return address.
|
||||
__ PopReturnAddressTo(scratch);
|
||||
__ Push(receiver);
|
||||
__ PushTaggedAnyField(FieldOperand(callback, AccessorInfo::kDataOffset),
|
||||
__ PushTaggedField(FieldOperand(callback, AccessorInfo::kDataOffset),
|
||||
decompr_scratch1);
|
||||
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
|
||||
__ Push(kScratchRegister); // return value
|
||||
@ -4856,7 +4849,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
||||
__ PushAddress(ExternalReference::isolate_address(masm->isolate()));
|
||||
__ Push(holder);
|
||||
__ Push(Smi::zero()); // should_throw_on_error -> false
|
||||
__ PushTaggedPointerField(FieldOperand(callback, AccessorInfo::kNameOffset),
|
||||
__ PushTaggedField(FieldOperand(callback, AccessorInfo::kNameOffset),
|
||||
decompr_scratch1);
|
||||
__ PushReturnAddressFrom(scratch);
|
||||
|
||||
@ -5129,11 +5122,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// Get the InstructionStream object from the shared function info.
|
||||
Register code_obj = rbx;
|
||||
TaggedRegister shared_function_info(code_obj);
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
shared_function_info,
|
||||
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
code_obj, FieldOperand(shared_function_info,
|
||||
__ LoadTaggedField(code_obj,
|
||||
FieldOperand(shared_function_info,
|
||||
SharedFunctionInfo::kFunctionDataOffset));
|
||||
|
||||
// Check if we have baseline code. For OSR entry it is safe to assume we
|
||||
@ -5166,9 +5159,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
Register feedback_vector = r11;
|
||||
|
||||
TaggedRegister feedback_cell(feedback_vector);
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(feedback_vector,
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
|
@ -659,8 +659,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||
Tagged_t compressed =
|
||||
Assembler::target_compressed_address_at(pc_, constant_pool_);
|
||||
DCHECK(!HAS_SMI_TAG(compressed));
|
||||
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
|
||||
compressed));
|
||||
Object obj(
|
||||
V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
|
||||
// Embedding of compressed InstructionStream objects must not happen when
|
||||
// external code space is enabled, because Codes must be used
|
||||
// instead.
|
||||
|
@ -1522,8 +1522,7 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
|
||||
bind(&maybe_has_optimized_code);
|
||||
Register optimized_code_entry = x7;
|
||||
LoadAnyTaggedField(
|
||||
optimized_code_entry,
|
||||
LoadTaggedField(optimized_code_entry,
|
||||
FieldMemOperand(feedback_vector,
|
||||
FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||
TailCallOptimizedCodeSlot(this, optimized_code_entry, x4);
|
||||
@ -1844,7 +1843,7 @@ void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) {
|
||||
void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
// TODO(v8:13466, olivf): With static roots we could use
|
||||
// DecompressTaggedPointer here. However, currently all roots have addresses
|
||||
// DecompressTagged here. However, currently all roots have addresses
|
||||
// that are too large to fit into addition immediate operands. Evidence
|
||||
// suggests that the extra instruction for decompression costs us more than
|
||||
// the load.
|
||||
@ -2061,8 +2060,8 @@ void MacroAssembler::LoadFromConstantsTable(Register destination,
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
|
||||
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
||||
LoadTaggedPointerField(
|
||||
destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
||||
LoadTaggedField(destination,
|
||||
FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
||||
constant_index)));
|
||||
}
|
||||
|
||||
@ -2449,7 +2448,7 @@ void MacroAssembler::BailoutIfDeoptimized() {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireX();
|
||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||
LoadTaggedPointerField(scratch,
|
||||
LoadTaggedField(scratch,
|
||||
MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
||||
Ldr(scratch.W(), FieldMemOperand(scratch, Code::kKindSpecificFlagsOffset));
|
||||
Label not_deoptimized;
|
||||
@ -2663,8 +2662,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
||||
// allow recompilation to take effect without changing any of the
|
||||
// call sites.
|
||||
Register code = kJavaScriptCallCodeStartRegister;
|
||||
LoadTaggedPointerField(code,
|
||||
FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||
LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||
switch (type) {
|
||||
case InvokeType::kCall:
|
||||
CallCodeObject(code);
|
||||
@ -2715,12 +2713,11 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
|
||||
|
||||
Register expected_parameter_count = x2;
|
||||
|
||||
LoadTaggedPointerField(cp,
|
||||
FieldMemOperand(function, JSFunction::kContextOffset));
|
||||
LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
||||
// The number of arguments is stored as an int32_t, and -1 is a marker
|
||||
// (kDontAdaptArgumentsSentinel), so we need sign
|
||||
// extension to correctly handle it.
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
expected_parameter_count,
|
||||
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
||||
Ldrh(expected_parameter_count,
|
||||
@ -2744,8 +2741,7 @@ void MacroAssembler::InvokeFunction(Register function,
|
||||
DCHECK_EQ(function, x1);
|
||||
|
||||
// Set up the context.
|
||||
LoadTaggedPointerField(cp,
|
||||
FieldMemOperand(function, JSFunction::kContextOffset));
|
||||
LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
||||
|
||||
InvokeFunctionCode(function, no_reg, expected_parameter_count,
|
||||
actual_parameter_count, type);
|
||||
@ -3012,7 +3008,7 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
|
||||
|
||||
void MacroAssembler::LoadMap(Register dst, Register object) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
LoadTaggedPointerField(dst, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
LoadTaggedField(dst, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
}
|
||||
|
||||
// Sets condition flags based on comparison, and returns type in type_reg.
|
||||
@ -3086,19 +3082,10 @@ void MacroAssembler::JumpIfIsInRange(const Register& value,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadTaggedPointerField(const Register& destination,
|
||||
void MacroAssembler::LoadTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressTaggedPointer(destination, field_operand);
|
||||
} else {
|
||||
Ldr(destination, field_operand);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadAnyTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressAnyTagged(destination, field_operand);
|
||||
DecompressTagged(destination, field_operand);
|
||||
} else {
|
||||
Ldr(destination, field_operand);
|
||||
}
|
||||
@ -3149,20 +3136,20 @@ void MacroAssembler::DecompressTaggedSigned(const Register& destination,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
||||
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
Ldr(destination.W(), field_operand);
|
||||
Add(destination, kPtrComprCageBaseRegister, destination);
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
||||
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||
const Register& source) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
Add(destination, kPtrComprCageBaseRegister, Operand(source, UXTW));
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
||||
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||
Tagged_t immediate) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
if (IsImmAddSub(immediate)) {
|
||||
@ -3178,13 +3165,6 @@ void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressAnyTagged(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
Ldr(destination.W(), field_operand);
|
||||
Add(destination, kPtrComprCageBaseRegister, destination);
|
||||
}
|
||||
|
||||
void MacroAssembler::AtomicDecompressTaggedSigned(const Register& destination,
|
||||
const Register& base,
|
||||
const Register& index,
|
||||
@ -3199,17 +3179,7 @@ void MacroAssembler::AtomicDecompressTaggedSigned(const Register& destination,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::AtomicDecompressTaggedPointer(const Register& destination,
|
||||
const Register& base,
|
||||
const Register& index,
|
||||
const Register& temp) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
Add(temp, base, index);
|
||||
Ldar(destination.W(), temp);
|
||||
Add(destination, kPtrComprCageBaseRegister, destination);
|
||||
}
|
||||
|
||||
void MacroAssembler::AtomicDecompressAnyTagged(const Register& destination,
|
||||
void MacroAssembler::AtomicDecompressTagged(const Register& destination,
|
||||
const Register& base,
|
||||
const Register& index,
|
||||
const Register& temp) {
|
||||
@ -3465,7 +3435,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
|
||||
Register temp = temps.AcquireX();
|
||||
DCHECK(!AreAliased(object, value, temp));
|
||||
Add(temp, object, offset);
|
||||
LoadTaggedPointerField(temp, MemOperand(temp));
|
||||
LoadTaggedField(temp, MemOperand(temp));
|
||||
Cmp(temp, value);
|
||||
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
|
||||
}
|
||||
@ -3572,10 +3542,10 @@ void MacroAssembler::Abort(AbortReason reason) {
|
||||
|
||||
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
||||
LoadMap(dst, cp);
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
dst, FieldMemOperand(
|
||||
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
||||
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
||||
LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
||||
}
|
||||
|
||||
void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
@ -3584,7 +3554,7 @@ void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Label* on_result,
|
||||
Label::Distance) {
|
||||
Label fallthrough, clear_slot;
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
scratch_and_result,
|
||||
FieldMemOperand(feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())));
|
||||
|
@ -1400,13 +1400,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||
// ---------------------------------------------------------------------------
|
||||
// Pointer compression Support
|
||||
|
||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
||||
// compression is enabled.
|
||||
void LoadTaggedPointerField(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
|
||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||
void LoadAnyTaggedField(const Register& destination,
|
||||
void LoadTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
|
||||
// Loads a field containing a tagged signed value and decompresses it if
|
||||
@ -1432,24 +1427,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||
|
||||
void DecompressTaggedSigned(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
void DecompressTaggedPointer(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
void DecompressTaggedPointer(const Register& destination,
|
||||
const Register& source);
|
||||
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
|
||||
void DecompressAnyTagged(const Register& destination,
|
||||
void DecompressTagged(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
void DecompressTagged(const Register& destination, const Register& source);
|
||||
void DecompressTagged(const Register& destination, Tagged_t immediate);
|
||||
|
||||
void AtomicDecompressTaggedSigned(const Register& destination,
|
||||
const Register& base, const Register& index,
|
||||
const Register& temp);
|
||||
void AtomicDecompressTaggedPointer(const Register& destination,
|
||||
const Register& base,
|
||||
const Register& index,
|
||||
const Register& temp);
|
||||
void AtomicDecompressAnyTagged(const Register& destination,
|
||||
const Register& base, const Register& index,
|
||||
const Register& temp);
|
||||
void AtomicDecompressTagged(const Register& destination, const Register& base,
|
||||
const Register& index, const Register& temp);
|
||||
|
||||
// Restore FP and LR from the values stored in the current frame. This will
|
||||
// authenticate the LR when pointer authentication is enabled.
|
||||
|
@ -148,7 +148,7 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc,
|
||||
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
|
||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
|
||||
cage_base,
|
||||
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
||||
} else {
|
||||
|
@ -120,10 +120,9 @@ void MacroAssembler::LoadFromConstantsTable(Register destination,
|
||||
|
||||
DCHECK_NE(destination, r0);
|
||||
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
||||
LoadTaggedPointerField(
|
||||
destination,
|
||||
FieldMemOperand(destination,
|
||||
FixedArray::OffsetOfElementAt(constant_index)),
|
||||
LoadTaggedField(destination,
|
||||
FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
||||
constant_index)),
|
||||
r0);
|
||||
}
|
||||
|
||||
@ -624,28 +623,18 @@ void MacroAssembler::LoadRoot(Register destination, RootIndex index,
|
||||
Condition cond) {
|
||||
DCHECK(cond == al);
|
||||
if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
|
||||
DecompressTaggedPointer(destination, ReadOnlyRootPtr(index));
|
||||
DecompressTagged(destination, ReadOnlyRootPtr(index));
|
||||
return;
|
||||
}
|
||||
LoadU64(destination,
|
||||
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadTaggedPointerField(const Register& destination,
|
||||
void MacroAssembler::LoadTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand,
|
||||
const Register& scratch) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressTaggedPointer(destination, field_operand);
|
||||
} else {
|
||||
LoadU64(destination, field_operand, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadAnyTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand,
|
||||
const Register& scratch) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressAnyTagged(destination, field_operand);
|
||||
DecompressTagged(destination, field_operand);
|
||||
} else {
|
||||
LoadU64(destination, field_operand, scratch);
|
||||
}
|
||||
@ -688,45 +677,28 @@ void MacroAssembler::DecompressTaggedSigned(Register destination,
|
||||
RecordComment("]");
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
||||
Register source) {
|
||||
RecordComment("[ DecompressTaggedPointer");
|
||||
void MacroAssembler::DecompressTagged(Register destination, Register source) {
|
||||
RecordComment("[ DecompressTagged");
|
||||
ZeroExtWord32(destination, source);
|
||||
add(destination, destination, kPtrComprCageBaseRegister);
|
||||
RecordComment("]");
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
||||
void MacroAssembler::DecompressTagged(Register destination,
|
||||
MemOperand field_operand) {
|
||||
RecordComment("[ DecompressTaggedPointer");
|
||||
RecordComment("[ DecompressTagged");
|
||||
LoadU32(destination, field_operand, r0);
|
||||
add(destination, destination, kPtrComprCageBaseRegister);
|
||||
RecordComment("]");
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
||||
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||
Tagged_t immediate) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
AddS64(destination, kPtrComprCageBaseRegister,
|
||||
Operand(immediate, RelocInfo::Mode::NO_INFO));
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressAnyTagged(Register destination,
|
||||
MemOperand field_operand) {
|
||||
RecordComment("[ DecompressAnyTagged");
|
||||
LoadU32(destination, field_operand, r0);
|
||||
add(destination, destination, kPtrComprCageBaseRegister);
|
||||
RecordComment("]");
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressAnyTagged(Register destination,
|
||||
Register source) {
|
||||
RecordComment("[ DecompressAnyTagged");
|
||||
ZeroExtWord32(destination, source);
|
||||
add(destination, destination, kPtrComprCageBaseRegister);
|
||||
RecordComment("]");
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadTaggedSignedField(Register destination,
|
||||
MemOperand field_operand,
|
||||
Register scratch) {
|
||||
@ -861,7 +833,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
|
||||
SaveFPRegsMode fp_mode, SmiCheck smi_check) {
|
||||
DCHECK(!AreAliased(object, value, slot_address));
|
||||
if (v8_flags.debug_code) {
|
||||
LoadTaggedPointerField(r0, MemOperand(slot_address));
|
||||
LoadTaggedField(r0, MemOperand(slot_address));
|
||||
CmpS64(r0, value);
|
||||
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
|
||||
}
|
||||
@ -1645,8 +1617,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
||||
// allow recompilation to take effect without changing any of the
|
||||
// call sites.
|
||||
Register code = kJavaScriptCallCodeStartRegister;
|
||||
LoadTaggedPointerField(
|
||||
code, FieldMemOperand(function, JSFunction::kCodeOffset), r0);
|
||||
LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset), r0);
|
||||
switch (type) {
|
||||
case InvokeType::kCall:
|
||||
CallCodeObject(code);
|
||||
@ -1673,10 +1644,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
|
||||
Register expected_reg = r5;
|
||||
Register temp_reg = r7;
|
||||
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
|
||||
r0);
|
||||
LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
|
||||
LoadU16(expected_reg,
|
||||
FieldMemOperand(temp_reg,
|
||||
SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
@ -1696,8 +1666,7 @@ void MacroAssembler::InvokeFunction(Register function,
|
||||
DCHECK_EQ(function, r4);
|
||||
|
||||
// Get the function and setup the context.
|
||||
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
|
||||
r0);
|
||||
LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
|
||||
|
||||
InvokeFunctionCode(r4, no_reg, expected_parameter_count,
|
||||
actual_parameter_count, type);
|
||||
@ -2163,7 +2132,7 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
|
||||
bind(&maybe_has_optimized_code);
|
||||
Register optimized_code_entry = flags;
|
||||
LoadAnyTaggedField(optimized_code_entry,
|
||||
LoadTaggedField(optimized_code_entry,
|
||||
FieldMemOperand(feedback_vector,
|
||||
FeedbackVector::kMaybeOptimizedCodeOffset),
|
||||
r0);
|
||||
@ -2307,17 +2276,17 @@ void MacroAssembler::Abort(AbortReason reason) {
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadMap(Register destination, Register object) {
|
||||
LoadTaggedPointerField(destination,
|
||||
FieldMemOperand(object, HeapObject::kMapOffset), r0);
|
||||
LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset),
|
||||
r0);
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
||||
LoadMap(dst, cp);
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
dst,
|
||||
FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset),
|
||||
r0);
|
||||
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
|
||||
LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_DEBUG_CODE
|
||||
|
@ -1010,19 +1010,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||
#endif
|
||||
}
|
||||
|
||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
||||
// compression is enabled.
|
||||
void LoadTaggedPointerField(const Register& destination,
|
||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||
void LoadTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand,
|
||||
const Register& scratch = no_reg);
|
||||
void LoadTaggedSignedField(Register destination, MemOperand field_operand,
|
||||
Register scratch);
|
||||
|
||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||
void LoadAnyTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand,
|
||||
const Register& scratch = no_reg);
|
||||
|
||||
// Compresses and stores tagged value to given on-heap location.
|
||||
void StoreTaggedField(const Register& value,
|
||||
const MemOperand& dst_field_operand,
|
||||
@ -1030,11 +1024,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||
|
||||
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
|
||||
void DecompressTaggedSigned(Register destination, Register src);
|
||||
void DecompressTaggedPointer(Register destination, MemOperand field_operand);
|
||||
void DecompressTaggedPointer(Register destination, Register source);
|
||||
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
|
||||
void DecompressAnyTagged(Register destination, MemOperand field_operand);
|
||||
void DecompressAnyTagged(Register destination, Register source);
|
||||
void DecompressTagged(Register destination, MemOperand field_operand);
|
||||
void DecompressTagged(Register destination, Register source);
|
||||
void DecompressTagged(const Register& destination, Tagged_t immediate);
|
||||
|
||||
void LoadF64(DoubleRegister dst, const MemOperand& mem,
|
||||
Register scratch = no_reg);
|
||||
|
@ -162,7 +162,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
|
||||
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
|
||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
|
||||
cage_base,
|
||||
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
||||
} else {
|
||||
|
@ -238,8 +238,7 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
|
||||
bind(&maybe_has_optimized_code);
|
||||
Register optimized_code_entry = flags;
|
||||
LoadAnyTaggedField(
|
||||
optimized_code_entry,
|
||||
LoadTaggedField(optimized_code_entry,
|
||||
FieldMemOperand(feedback_vector,
|
||||
FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||
TailCallOptimizedCodeSlot(this, optimized_code_entry, temps.Acquire(),
|
||||
@ -413,7 +412,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
|
||||
Register temp = temps.Acquire();
|
||||
DCHECK(!AreAliased(object, value, temp));
|
||||
AddWord(temp, object, offset);
|
||||
LoadTaggedPointerField(temp, MemOperand(temp));
|
||||
LoadTaggedField(temp, MemOperand(temp));
|
||||
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, temp,
|
||||
Operand(value));
|
||||
}
|
||||
@ -4216,8 +4215,8 @@ void MacroAssembler::LoadFromConstantsTable(Register destination,
|
||||
int constant_index) {
|
||||
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
|
||||
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
||||
LoadTaggedPointerField(
|
||||
destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
||||
LoadTaggedField(destination,
|
||||
FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
||||
constant_index)));
|
||||
}
|
||||
|
||||
@ -4916,8 +4915,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
||||
// allow recompilation to take effect without changing any of the
|
||||
// call sites.
|
||||
Register code = kJavaScriptCallCodeStartRegister;
|
||||
LoadTaggedPointerField(code,
|
||||
FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||
LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||
switch (type) {
|
||||
case InvokeType::kCall:
|
||||
CallCodeObject(code);
|
||||
@ -4944,11 +4942,10 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
|
||||
{
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp_reg = temps.Acquire();
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
temp_reg,
|
||||
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
||||
LoadTaggedPointerField(
|
||||
cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
||||
LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
||||
// The argument count is stored as uint16_t
|
||||
Lhu(expected_parameter_count,
|
||||
FieldMemOperand(temp_reg,
|
||||
@ -4969,7 +4966,7 @@ void MacroAssembler::InvokeFunction(Register function,
|
||||
DCHECK_EQ(function, a1);
|
||||
|
||||
// Get the function and setup the context.
|
||||
LoadTaggedPointerField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
||||
LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
||||
|
||||
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
|
||||
actual_parameter_count, type);
|
||||
@ -5498,17 +5495,16 @@ void MacroAssembler::Abort(AbortReason reason) {
|
||||
|
||||
void MacroAssembler::LoadMap(Register destination, Register object) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
LoadTaggedPointerField(destination,
|
||||
FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
LoadMap(dst, cp);
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
dst, FieldMemOperand(
|
||||
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
||||
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
||||
LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
||||
}
|
||||
|
||||
void MacroAssembler::StubPrologue(StackFrame::Type type) {
|
||||
@ -6186,19 +6182,10 @@ void MacroAssembler::JumpCodeObject(Register code, JumpMode jump_mode) {
|
||||
}
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
void MacroAssembler::LoadTaggedPointerField(const Register& destination,
|
||||
void MacroAssembler::LoadTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressTaggedPointer(destination, field_operand);
|
||||
} else {
|
||||
Ld(destination, field_operand);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadAnyTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressAnyTagged(destination, field_operand);
|
||||
DecompressTagged(destination, field_operand);
|
||||
} else {
|
||||
Ld(destination, field_operand);
|
||||
}
|
||||
@ -6237,26 +6224,19 @@ void MacroAssembler::DecompressTaggedSigned(const Register& destination,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
||||
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
Lwu(destination, field_operand);
|
||||
AddWord(destination, kPtrComprCageBaseRegister, destination);
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
||||
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||
const Register& source) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
And(destination, source, Operand(0xFFFFFFFF));
|
||||
AddWord(destination, kPtrComprCageBaseRegister, Operand(destination));
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressAnyTagged(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
Lwu(destination, field_operand);
|
||||
AddWord(destination, kPtrComprCageBaseRegister, destination);
|
||||
}
|
||||
#endif
|
||||
void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
|
||||
ArgumentsCountMode mode, Register scratch) {
|
||||
|
@ -1072,13 +1072,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||
// ---------------------------------------------------------------------------
|
||||
// Pointer compression Support
|
||||
|
||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
||||
// compression is enabled.
|
||||
void LoadTaggedPointerField(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
|
||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||
void LoadAnyTaggedField(const Register& destination,
|
||||
void LoadTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
|
||||
// Loads a field containing a tagged signed value and decompresses it if
|
||||
@ -1095,12 +1090,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||
|
||||
void DecompressTaggedSigned(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
void DecompressTaggedPointer(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
void DecompressTaggedPointer(const Register& destination,
|
||||
const Register& source);
|
||||
void DecompressAnyTagged(const Register& destination,
|
||||
void DecompressTagged(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
void DecompressTagged(const Register& destination, const Register& source);
|
||||
void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
Sub32(rd, rs1, rs2);
|
||||
@ -1113,11 +1105,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||
// Pointer compression Support
|
||||
// rv32 don't support Pointer compression. Defines these functions for
|
||||
// simplify builtins.
|
||||
inline void LoadTaggedPointerField(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
Lw(destination, field_operand);
|
||||
}
|
||||
inline void LoadAnyTaggedField(const Register& destination,
|
||||
inline void LoadTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
Lw(destination, field_operand);
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
|
||||
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
|
||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
|
||||
cage_base,
|
||||
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
||||
} else {
|
||||
|
@ -343,10 +343,9 @@ void MacroAssembler::LoadFromConstantsTable(Register destination,
|
||||
CHECK(is_uint19(offset));
|
||||
DCHECK_NE(destination, r0);
|
||||
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
||||
LoadTaggedPointerField(
|
||||
destination,
|
||||
FieldMemOperand(destination,
|
||||
FixedArray::OffsetOfElementAt(constant_index)),
|
||||
LoadTaggedField(destination,
|
||||
FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
||||
constant_index)),
|
||||
r1);
|
||||
}
|
||||
|
||||
@ -862,28 +861,18 @@ void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) {
|
||||
void MacroAssembler::LoadRoot(Register destination, RootIndex index,
|
||||
Condition) {
|
||||
if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
|
||||
DecompressTaggedPointer(destination, ReadOnlyRootPtr(index));
|
||||
DecompressTagged(destination, ReadOnlyRootPtr(index));
|
||||
return;
|
||||
}
|
||||
LoadU64(destination,
|
||||
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadTaggedPointerField(const Register& destination,
|
||||
void MacroAssembler::LoadTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand,
|
||||
const Register& scratch) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressTaggedPointer(destination, field_operand);
|
||||
} else {
|
||||
LoadU64(destination, field_operand, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadAnyTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand,
|
||||
const Register& scratch) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressAnyTagged(destination, field_operand);
|
||||
DecompressTagged(destination, field_operand);
|
||||
} else {
|
||||
LoadU64(destination, field_operand, scratch);
|
||||
}
|
||||
@ -928,45 +917,28 @@ void MacroAssembler::DecompressTaggedSigned(Register destination,
|
||||
RecordComment("]");
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
||||
Register source) {
|
||||
RecordComment("[ DecompressTaggedPointer");
|
||||
void MacroAssembler::DecompressTagged(Register destination, Register source) {
|
||||
RecordComment("[ DecompressTagged");
|
||||
llgfr(destination, source);
|
||||
agr(destination, kRootRegister);
|
||||
RecordComment("]");
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
||||
void MacroAssembler::DecompressTagged(Register destination,
|
||||
MemOperand field_operand) {
|
||||
RecordComment("[ DecompressTaggedPointer");
|
||||
RecordComment("[ DecompressTagged");
|
||||
llgf(destination, field_operand);
|
||||
agr(destination, kRootRegister);
|
||||
RecordComment("]");
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
||||
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||
Tagged_t immediate) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
mov(destination, Operand(immediate, RelocInfo::NO_INFO));
|
||||
agr(destination, kRootRegister);
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressAnyTagged(Register destination,
|
||||
MemOperand field_operand) {
|
||||
RecordComment("[ DecompressAnyTagged");
|
||||
llgf(destination, field_operand);
|
||||
agr(destination, kRootRegister);
|
||||
RecordComment("]");
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressAnyTagged(Register destination,
|
||||
Register source) {
|
||||
RecordComment("[ DecompressAnyTagged");
|
||||
llgfr(destination, source);
|
||||
agr(destination, kRootRegister);
|
||||
RecordComment("]");
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadTaggedSignedField(Register destination,
|
||||
MemOperand field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
@ -1099,7 +1071,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
|
||||
SaveFPRegsMode fp_mode, SmiCheck smi_check) {
|
||||
DCHECK(!AreAliased(object, slot_address, value));
|
||||
if (v8_flags.debug_code) {
|
||||
LoadTaggedPointerField(r0, MemOperand(slot_address));
|
||||
LoadTaggedField(r0, MemOperand(slot_address));
|
||||
CmpS64(value, r0);
|
||||
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
|
||||
}
|
||||
@ -1827,8 +1799,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
||||
// allow recompilation to take effect without changing any of the
|
||||
// call sites.
|
||||
Register code = kJavaScriptCallCodeStartRegister;
|
||||
LoadTaggedPointerField(code,
|
||||
FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||
LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||
switch (type) {
|
||||
case InvokeType::kCall:
|
||||
CallCodeObject(code);
|
||||
@ -1853,9 +1824,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
|
||||
|
||||
Register expected_reg = r4;
|
||||
Register temp_reg = r6;
|
||||
LoadTaggedPointerField(cp, FieldMemOperand(fun, JSFunction::kContextOffset));
|
||||
LoadTaggedPointerField(
|
||||
temp_reg, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
|
||||
LoadTaggedField(cp, FieldMemOperand(fun, JSFunction::kContextOffset));
|
||||
LoadTaggedField(temp_reg,
|
||||
FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
|
||||
LoadU16(
|
||||
expected_reg,
|
||||
FieldMemOperand(temp_reg,
|
||||
@ -1876,8 +1847,7 @@ void MacroAssembler::InvokeFunction(Register function,
|
||||
DCHECK_EQ(function, r3);
|
||||
|
||||
// Get the function and setup the context.
|
||||
LoadTaggedPointerField(cp,
|
||||
FieldMemOperand(function, JSFunction::kContextOffset));
|
||||
LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
||||
|
||||
InvokeFunctionCode(r3, no_reg, expected_parameter_count,
|
||||
actual_parameter_count, type);
|
||||
@ -2158,8 +2128,7 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
|
||||
bind(&maybe_has_optimized_code);
|
||||
Register optimized_code_entry = flags;
|
||||
LoadAnyTaggedField(
|
||||
optimized_code_entry,
|
||||
LoadTaggedField(optimized_code_entry,
|
||||
FieldMemOperand(feedback_vector,
|
||||
FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||
TailCallOptimizedCodeSlot(this, optimized_code_entry, r8);
|
||||
@ -2301,16 +2270,15 @@ void MacroAssembler::Abort(AbortReason reason) {
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadMap(Register destination, Register object) {
|
||||
LoadTaggedPointerField(destination,
|
||||
FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
||||
LoadMap(dst, cp);
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
dst, FieldMemOperand(
|
||||
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
||||
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
||||
LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_DEBUG_CODE
|
||||
|
@ -1464,18 +1464,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||
#endif
|
||||
}
|
||||
|
||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
||||
// compression is enabled.
|
||||
void LoadTaggedPointerField(const Register& destination,
|
||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||
void LoadTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand,
|
||||
const Register& scratch = no_reg);
|
||||
void LoadTaggedSignedField(Register destination, MemOperand field_operand);
|
||||
|
||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||
void LoadAnyTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand,
|
||||
const Register& scratch = no_reg);
|
||||
|
||||
// Loads a field containing smi value and untags it.
|
||||
void SmiUntagField(Register dst, const MemOperand& src);
|
||||
|
||||
@ -1486,11 +1480,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||
|
||||
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
|
||||
void DecompressTaggedSigned(Register destination, Register src);
|
||||
void DecompressTaggedPointer(Register destination, MemOperand field_operand);
|
||||
void DecompressTaggedPointer(Register destination, Register source);
|
||||
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
|
||||
void DecompressAnyTagged(Register destination, MemOperand field_operand);
|
||||
void DecompressAnyTagged(Register destination, Register source);
|
||||
void DecompressTagged(Register destination, MemOperand field_operand);
|
||||
void DecompressTagged(Register destination, Register source);
|
||||
void DecompressTagged(const Register& destination, Tagged_t immediate);
|
||||
|
||||
// CountLeadingZeros will corrupt the scratch register pair (eg. r0:r1)
|
||||
void CountLeadingZerosU32(Register dst, Register src,
|
||||
|
@ -283,8 +283,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||
Tagged_t compressed = ReadUnalignedValue<Tagged_t>(pc_);
|
||||
DCHECK(!HAS_SMI_TAG(compressed));
|
||||
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
|
||||
compressed));
|
||||
Object obj(
|
||||
V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
|
||||
// Embedding of compressed InstructionStream objects must not happen when
|
||||
// external code space is enabled, because Codes must be used
|
||||
// instead.
|
||||
|
@ -85,7 +85,7 @@ void MacroAssembler::LoadFromConstantsTable(Register destination,
|
||||
int constant_index) {
|
||||
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
|
||||
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
destination,
|
||||
FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
|
||||
}
|
||||
@ -174,7 +174,7 @@ void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) {
|
||||
|
||||
void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
|
||||
if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
|
||||
DecompressTaggedPointer(destination, ReadOnlyRootPtr(index));
|
||||
DecompressTagged(destination, ReadOnlyRootPtr(index));
|
||||
return;
|
||||
}
|
||||
DCHECK(root_array_available_);
|
||||
@ -220,23 +220,22 @@ void MacroAssembler::CompareRoot(Operand with, RootIndex index) {
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadMap(Register destination, Register object) {
|
||||
LoadTaggedPointerField(destination,
|
||||
FieldOperand(object, HeapObject::kMapOffset));
|
||||
LoadTaggedField(destination, FieldOperand(object, HeapObject::kMapOffset));
|
||||
#ifdef V8_MAP_PACKING
|
||||
UnpackMapWord(destination);
|
||||
#endif
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadTaggedPointerField(Register destination,
|
||||
void MacroAssembler::LoadTaggedField(Register destination,
|
||||
Operand field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressTaggedPointer(destination, field_operand);
|
||||
DecompressTagged(destination, field_operand);
|
||||
} else {
|
||||
mov_tagged(destination, field_operand);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadTaggedPointerField(TaggedRegister destination,
|
||||
void MacroAssembler::LoadTaggedField(TaggedRegister destination,
|
||||
Operand field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
movl(destination.reg(), field_operand);
|
||||
@ -264,40 +263,10 @@ void MacroAssembler::LoadTaggedSignedField(Register destination,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadAnyTaggedField(Register destination,
|
||||
Operand field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressAnyTagged(destination, field_operand);
|
||||
} else {
|
||||
mov_tagged(destination, field_operand);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadAnyTaggedField(TaggedRegister destination,
|
||||
Operand field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
movl(destination.reg(), field_operand);
|
||||
} else {
|
||||
mov_tagged(destination.reg(), field_operand);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::PushTaggedPointerField(Operand field_operand,
|
||||
Register scratch) {
|
||||
void MacroAssembler::PushTaggedField(Operand field_operand, Register scratch) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DCHECK(!field_operand.AddressUsesRegister(scratch));
|
||||
DecompressTaggedPointer(scratch, field_operand);
|
||||
Push(scratch);
|
||||
} else {
|
||||
Push(field_operand);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::PushTaggedAnyField(Operand field_operand,
|
||||
Register scratch) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DCHECK(!field_operand.AddressUsesRegister(scratch));
|
||||
DecompressAnyTagged(scratch, field_operand);
|
||||
DecompressTagged(scratch, field_operand);
|
||||
Push(scratch);
|
||||
} else {
|
||||
Push(field_operand);
|
||||
@ -357,28 +326,20 @@ void MacroAssembler::DecompressTaggedSigned(Register destination,
|
||||
movl(destination, field_operand);
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
||||
void MacroAssembler::DecompressTagged(Register destination,
|
||||
Operand field_operand) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
movl(destination, field_operand);
|
||||
addq(destination, kPtrComprCageBaseRegister);
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
||||
Register source) {
|
||||
void MacroAssembler::DecompressTagged(Register destination, Register source) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
movl(destination, source);
|
||||
addq(destination, kPtrComprCageBaseRegister);
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressAnyTagged(Register destination,
|
||||
Operand field_operand) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
movl(destination, field_operand);
|
||||
addq(destination, kPtrComprCageBaseRegister);
|
||||
}
|
||||
|
||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
||||
void MacroAssembler::DecompressTagged(Register destination,
|
||||
Tagged_t immediate) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
leaq(destination,
|
||||
@ -951,7 +912,7 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
|
||||
bind(&maybe_has_optimized_code);
|
||||
Register optimized_code_entry = flags;
|
||||
LoadAnyTaggedField(
|
||||
LoadTaggedField(
|
||||
optimized_code_entry,
|
||||
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||
TailCallOptimizedCodeSlot(this, optimized_code_entry, closure, r9,
|
||||
@ -2803,7 +2764,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
|
||||
Register actual_parameter_count,
|
||||
InvokeType type) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
||||
movzxwq(rbx,
|
||||
FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
@ -2816,8 +2777,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
|
||||
Register actual_parameter_count,
|
||||
InvokeType type) {
|
||||
DCHECK_EQ(function, rdi);
|
||||
LoadTaggedPointerField(rsi,
|
||||
FieldOperand(function, JSFunction::kContextOffset));
|
||||
LoadTaggedField(rsi, FieldOperand(function, JSFunction::kContextOffset));
|
||||
InvokeFunctionCode(rdi, new_target, expected_parameter_count,
|
||||
actual_parameter_count, type);
|
||||
}
|
||||
@ -2857,7 +2817,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
||||
// allow recompilation to take effect without changing any of the
|
||||
// call sites.
|
||||
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
||||
LoadTaggedPointerField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
|
||||
LoadTaggedField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
|
||||
switch (type) {
|
||||
case InvokeType::kCall:
|
||||
CallCodeObject(rcx);
|
||||
@ -3227,11 +3187,11 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
// Load native context.
|
||||
LoadMap(dst, rsi);
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
dst,
|
||||
FieldOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
||||
// Load value from native context.
|
||||
LoadTaggedPointerField(dst, Operand(dst, Context::SlotOffset(index)));
|
||||
LoadTaggedField(dst, Operand(dst, Context::SlotOffset(index)));
|
||||
}
|
||||
|
||||
void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
@ -3240,7 +3200,7 @@ void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Label* on_result,
|
||||
Label::Distance distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
scratch_and_result,
|
||||
FieldOperand(feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())));
|
||||
@ -3413,8 +3373,7 @@ void MacroAssembler::ComputeCodeStartAddress(Register dst) {
|
||||
// 3. if it is not zero then it jumps to the builtin.
|
||||
void MacroAssembler::BailoutIfDeoptimized(Register scratch) {
|
||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||
LoadTaggedPointerField(scratch,
|
||||
Operand(kJavaScriptCallCodeStartRegister, offset));
|
||||
LoadTaggedField(scratch, Operand(kJavaScriptCallCodeStartRegister, offset));
|
||||
testl(FieldOperand(scratch, Code::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
|
||||
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||
|
@ -583,35 +583,21 @@ class V8_EXPORT_PRIVATE MacroAssembler
|
||||
// ---------------------------------------------------------------------------
|
||||
// Pointer compression support
|
||||
|
||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
||||
// compression is enabled.
|
||||
void LoadTaggedPointerField(Register destination, Operand field_operand);
|
||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||
void LoadTaggedField(Register destination, Operand field_operand);
|
||||
|
||||
// Loads a field containing a HeapObject but does not decompress it when
|
||||
// Loads a field containing any tagged value but does not decompress it when
|
||||
// pointer compression is enabled.
|
||||
void LoadTaggedPointerField(TaggedRegister destination,
|
||||
Operand field_operand);
|
||||
void LoadTaggedField(TaggedRegister destination, Operand field_operand);
|
||||
|
||||
// Loads a field containing a Smi and decompresses it if pointer compression
|
||||
// is enabled.
|
||||
void LoadTaggedSignedField(Register destination, Operand field_operand);
|
||||
|
||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||
void LoadAnyTaggedField(Register destination, Operand field_operand);
|
||||
|
||||
// Loads a field containing any tagged value but does not decompress it when
|
||||
// pointer compression is enabled.
|
||||
void LoadAnyTaggedField(TaggedRegister destination, Operand field_operand);
|
||||
|
||||
// Loads a field containing a HeapObject, decompresses it if necessary and
|
||||
// pushes full pointer to the stack. When pointer compression is enabled,
|
||||
// uses |scratch| to decompress the value.
|
||||
void PushTaggedPointerField(Operand field_operand, Register scratch);
|
||||
|
||||
// Loads a field containing any tagged value, decompresses it if necessary and
|
||||
// pushes the full pointer to the stack. When pointer compression is enabled,
|
||||
// uses |scratch| to decompress the value.
|
||||
void PushTaggedAnyField(Operand field_operand, Register scratch);
|
||||
void PushTaggedField(Operand field_operand, Register scratch);
|
||||
|
||||
// Loads a field containing smi value and untags it.
|
||||
void SmiUntagField(Register dst, Operand src);
|
||||
@ -626,10 +612,9 @@ class V8_EXPORT_PRIVATE MacroAssembler
|
||||
|
||||
// The following macros work even when pointer compression is not enabled.
|
||||
void DecompressTaggedSigned(Register destination, Operand field_operand);
|
||||
void DecompressTaggedPointer(Register destination, Operand field_operand);
|
||||
void DecompressTaggedPointer(Register destination, Register source);
|
||||
void DecompressTaggedPointer(Register destination, Tagged_t immediate);
|
||||
void DecompressAnyTagged(Register destination, Operand field_operand);
|
||||
void DecompressTagged(Register destination, Operand field_operand);
|
||||
void DecompressTagged(Register destination, Register source);
|
||||
void DecompressTagged(Register destination, Tagged_t immediate);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// V8 Sandbox support
|
||||
|
@ -69,8 +69,8 @@ Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
|
||||
|
||||
// static
|
||||
template <typename TOnHeapAddress>
|
||||
Address V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
||||
Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||
Tagged_t raw_value) {
|
||||
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
|
||||
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
|
||||
V8_ASSUME((base_ & kPtrComprCageBaseMask) == base_);
|
||||
@ -90,13 +90,6 @@ Address V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
return result;
|
||||
}
|
||||
|
||||
// static
|
||||
template <typename TOnHeapAddress>
|
||||
Address V8HeapCompressionScheme::DecompressTaggedAny(
|
||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
||||
return DecompressTaggedPointer(on_heap_addr, raw_value);
|
||||
}
|
||||
|
||||
// static
|
||||
template <typename ProcessPointerCallback>
|
||||
void V8HeapCompressionScheme::ProcessIntermediatePointers(
|
||||
@ -105,10 +98,10 @@ void V8HeapCompressionScheme::ProcessIntermediatePointers(
|
||||
// If pointer compression is enabled, we may have random compressed pointers
|
||||
// on the stack that may be used for subsequent operations.
|
||||
// Extract, decompress and trace both halfwords.
|
||||
Address decompressed_low = V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
Address decompressed_low = V8HeapCompressionScheme::DecompressTagged(
|
||||
cage_base, static_cast<Tagged_t>(raw_value));
|
||||
callback(decompressed_low);
|
||||
Address decompressed_high = V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
Address decompressed_high = V8HeapCompressionScheme::DecompressTagged(
|
||||
cage_base,
|
||||
static_cast<Tagged_t>(raw_value >> (sizeof(Tagged_t) * CHAR_BIT)));
|
||||
callback(decompressed_high);
|
||||
@ -165,7 +158,7 @@ Address ExternalCodeCompressionScheme::DecompressTaggedSigned(
|
||||
|
||||
// static
|
||||
template <typename TOnHeapAddress>
|
||||
Address ExternalCodeCompressionScheme::DecompressTaggedPointer(
|
||||
Address ExternalCodeCompressionScheme::DecompressTagged(
|
||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
||||
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
|
||||
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
|
||||
@ -186,13 +179,6 @@ Address ExternalCodeCompressionScheme::DecompressTaggedPointer(
|
||||
return result;
|
||||
}
|
||||
|
||||
// static
|
||||
template <typename TOnHeapAddress>
|
||||
Address ExternalCodeCompressionScheme::DecompressTaggedAny(
|
||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
||||
return DecompressTaggedPointer(on_heap_addr, raw_value);
|
||||
}
|
||||
|
||||
#endif // V8_EXTERNAL_CODE_SPACE
|
||||
|
||||
//
|
||||
@ -229,15 +215,8 @@ Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
|
||||
|
||||
// static
|
||||
template <typename TOnHeapAddress>
|
||||
Address V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// static
|
||||
template <typename TOnHeapAddress>
|
||||
Address V8HeapCompressionScheme::DecompressTaggedAny(
|
||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
||||
Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||
Tagged_t raw_value) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
|
@ -29,14 +29,9 @@ class V8HeapCompressionScheme {
|
||||
// Decompresses smi value.
|
||||
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
|
||||
|
||||
// Decompresses weak or strong heap object pointer or forwarding pointer,
|
||||
// preserving both weak- and smi- tags.
|
||||
template <typename TOnHeapAddress>
|
||||
V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
|
||||
Tagged_t raw_value);
|
||||
// Decompresses any tagged value, preserving both weak- and smi- tags.
|
||||
template <typename TOnHeapAddress>
|
||||
V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
|
||||
V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||
Tagged_t raw_value);
|
||||
|
||||
// Given a 64bit raw value, found on the stack, calls the callback function
|
||||
@ -82,14 +77,9 @@ class ExternalCodeCompressionScheme {
|
||||
// Decompresses smi value.
|
||||
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
|
||||
|
||||
// Decompresses weak or strong heap object pointer or forwarding pointer,
|
||||
// preserving both weak- and smi- tags.
|
||||
template <typename TOnHeapAddress>
|
||||
V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
|
||||
Tagged_t raw_value);
|
||||
// Decompresses any tagged value, preserving both weak- and smi- tags.
|
||||
template <typename TOnHeapAddress>
|
||||
V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
|
||||
V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||
Tagged_t raw_value);
|
||||
|
||||
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||
|
@ -284,7 +284,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
||||
|
||||
void Generate() final {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
__ DecompressTaggedPointer(value_, value_);
|
||||
__ DecompressTagged(value_, value_);
|
||||
}
|
||||
__ CheckPageFlag(
|
||||
value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
||||
@ -752,14 +752,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// Check the function's context matches the context argument.
|
||||
UseScratchRegisterScope scope(masm());
|
||||
Register temp = scope.AcquireX();
|
||||
__ LoadTaggedPointerField(
|
||||
temp, FieldMemOperand(func, JSFunction::kContextOffset));
|
||||
__ LoadTaggedField(temp,
|
||||
FieldMemOperand(func, JSFunction::kContextOffset));
|
||||
__ cmp(cp, temp);
|
||||
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
||||
}
|
||||
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
|
||||
__ LoadTaggedPointerField(x2,
|
||||
FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||
__ LoadTaggedField(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||
__ CallCodeObject(x2);
|
||||
RecordCallPosition(instr);
|
||||
frame_access_state()->ClearSPDelta();
|
||||
@ -1890,22 +1889,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
case kArm64LdrDecompressTaggedSigned:
|
||||
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64LdrDecompressTaggedPointer:
|
||||
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64LdrDecompressAnyTagged:
|
||||
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
|
||||
case kArm64LdrDecompressTagged:
|
||||
__ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64LdarDecompressTaggedSigned:
|
||||
__ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
|
||||
i.InputRegister(1), i.TempRegister(0));
|
||||
break;
|
||||
case kArm64LdarDecompressTaggedPointer:
|
||||
__ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
|
||||
i.InputRegister(1), i.TempRegister(0));
|
||||
break;
|
||||
case kArm64LdarDecompressAnyTagged:
|
||||
__ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
|
||||
case kArm64LdarDecompressTagged:
|
||||
__ AtomicDecompressTagged(i.OutputRegister(), i.InputRegister(0),
|
||||
i.InputRegister(1), i.TempRegister(0));
|
||||
break;
|
||||
case kArm64LdrDecodeSandboxedPointer:
|
||||
|
@ -199,11 +199,9 @@ namespace compiler {
|
||||
V(Arm64Float64MoveU64) \
|
||||
V(Arm64U64MoveFloat64) \
|
||||
V(Arm64LdrDecompressTaggedSigned) \
|
||||
V(Arm64LdrDecompressTaggedPointer) \
|
||||
V(Arm64LdrDecompressAnyTagged) \
|
||||
V(Arm64LdrDecompressTagged) \
|
||||
V(Arm64LdarDecompressTaggedSigned) \
|
||||
V(Arm64LdarDecompressTaggedPointer) \
|
||||
V(Arm64LdarDecompressAnyTagged) \
|
||||
V(Arm64LdarDecompressTagged) \
|
||||
V(Arm64StrCompressTagged) \
|
||||
V(Arm64StlrCompressTagged) \
|
||||
V(Arm64LdrDecodeSandboxedPointer) \
|
||||
|
@ -315,11 +315,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kArm64LdrW:
|
||||
case kArm64Ldr:
|
||||
case kArm64LdrDecompressTaggedSigned:
|
||||
case kArm64LdrDecompressTaggedPointer:
|
||||
case kArm64LdrDecompressAnyTagged:
|
||||
case kArm64LdrDecompressTagged:
|
||||
case kArm64LdarDecompressTaggedSigned:
|
||||
case kArm64LdarDecompressTaggedPointer:
|
||||
case kArm64LdarDecompressAnyTagged:
|
||||
case kArm64LdarDecompressTagged:
|
||||
case kArm64LdrDecodeSandboxedPointer:
|
||||
case kArm64Peek:
|
||||
case kArm64LoadSplat:
|
||||
@ -431,8 +429,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
return 1;
|
||||
|
||||
case kArm64LdrDecompressTaggedSigned:
|
||||
case kArm64LdrDecompressTaggedPointer:
|
||||
case kArm64LdrDecompressAnyTagged:
|
||||
case kArm64LdrDecompressTagged:
|
||||
case kArm64Ldr:
|
||||
case kArm64LdrD:
|
||||
case kArm64LdrS:
|
||||
|
@ -843,11 +843,8 @@ void InstructionSelector::VisitLoad(Node* node) {
|
||||
immediate_mode = kLoadStoreImm32;
|
||||
break;
|
||||
case MachineRepresentation::kTaggedPointer:
|
||||
opcode = kArm64LdrDecompressTaggedPointer;
|
||||
immediate_mode = kLoadStoreImm32;
|
||||
break;
|
||||
case MachineRepresentation::kTagged:
|
||||
opcode = kArm64LdrDecompressAnyTagged;
|
||||
opcode = kArm64LdrDecompressTagged;
|
||||
immediate_mode = kLoadStoreImm32;
|
||||
break;
|
||||
#else
|
||||
@ -2773,10 +2770,10 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
|
||||
code = kArm64LdarDecompressTaggedSigned;
|
||||
break;
|
||||
case MachineRepresentation::kTaggedPointer:
|
||||
code = kArm64LdarDecompressTaggedPointer;
|
||||
code = kArm64LdarDecompressTagged;
|
||||
break;
|
||||
case MachineRepresentation::kTagged:
|
||||
code = kArm64LdarDecompressAnyTagged;
|
||||
code = kArm64LdarDecompressTagged;
|
||||
break;
|
||||
#else
|
||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||
|
@ -172,7 +172,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
||||
void Generate() final {
|
||||
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
__ DecompressTaggedPointer(value_, value_);
|
||||
__ DecompressTagged(value_, value_);
|
||||
}
|
||||
__ CheckPageFlag(
|
||||
value_, scratch0_,
|
||||
@ -793,8 +793,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
||||
}
|
||||
|
||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||
__ LoadTaggedPointerField(
|
||||
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
|
||||
__ LoadTaggedField(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset),
|
||||
r0);
|
||||
__ LoadS32(r11, FieldMemOperand(r11, Code::kKindSpecificFlagsOffset), r0);
|
||||
__ TestBit(r11, InstructionStream::kMarkedForDeoptimizationBit);
|
||||
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||
@ -908,14 +908,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
Register func = i.InputRegister(0);
|
||||
if (v8_flags.debug_code) {
|
||||
// Check the function's context matches the context argument.
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset), r0);
|
||||
__ CmpS64(cp, kScratchReg);
|
||||
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
||||
}
|
||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||
__ LoadTaggedPointerField(
|
||||
r5, FieldMemOperand(func, JSFunction::kCodeOffset), r0);
|
||||
__ LoadTaggedField(r5, FieldMemOperand(func, JSFunction::kCodeOffset),
|
||||
r0);
|
||||
__ CallCodeObject(r5);
|
||||
RecordCallPosition(instr);
|
||||
DCHECK_EQ(LeaveRC, i.OutputRCBit());
|
||||
@ -2880,13 +2880,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
||||
break;
|
||||
}
|
||||
case kPPC_LoadDecompressTaggedPointer: {
|
||||
CHECK(instr->HasOutput());
|
||||
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
||||
__ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister);
|
||||
break;
|
||||
}
|
||||
case kPPC_LoadDecompressAnyTagged: {
|
||||
case kPPC_LoadDecompressTagged: {
|
||||
CHECK(instr->HasOutput());
|
||||
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
||||
__ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister);
|
||||
|
@ -411,8 +411,7 @@ namespace compiler {
|
||||
V(PPC_S128Store64Lane) \
|
||||
V(PPC_StoreCompressTagged) \
|
||||
V(PPC_LoadDecompressTaggedSigned) \
|
||||
V(PPC_LoadDecompressTaggedPointer) \
|
||||
V(PPC_LoadDecompressAnyTagged)
|
||||
V(PPC_LoadDecompressTagged)
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
// Many instructions support multiple addressing modes. Addressing modes
|
||||
|
@ -331,8 +331,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kPPC_LoadSimd128:
|
||||
case kPPC_Peek:
|
||||
case kPPC_LoadDecompressTaggedSigned:
|
||||
case kPPC_LoadDecompressTaggedPointer:
|
||||
case kPPC_LoadDecompressAnyTagged:
|
||||
case kPPC_LoadDecompressTagged:
|
||||
case kPPC_S128Load8Splat:
|
||||
case kPPC_S128Load16Splat:
|
||||
case kPPC_S128Load32Splat:
|
||||
|
@ -214,10 +214,10 @@ static void VisitLoadCommon(InstructionSelector* selector, Node* node,
|
||||
opcode = kPPC_LoadDecompressTaggedSigned;
|
||||
break;
|
||||
case MachineRepresentation::kTaggedPointer:
|
||||
opcode = kPPC_LoadDecompressTaggedPointer;
|
||||
opcode = kPPC_LoadDecompressTagged;
|
||||
break;
|
||||
case MachineRepresentation::kTagged:
|
||||
opcode = kPPC_LoadDecompressAnyTagged;
|
||||
opcode = kPPC_LoadDecompressTagged;
|
||||
break;
|
||||
#else
|
||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||
|
@ -173,7 +173,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
||||
void Generate() final {
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
__ DecompressTaggedPointer(value_, value_);
|
||||
__ DecompressTagged(value_, value_);
|
||||
}
|
||||
#endif
|
||||
__ CheckPageFlag(
|
||||
@ -628,8 +628,8 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
||||
// 3. if it is not zero then it jumps to the builtin.
|
||||
void CodeGenerator::BailoutIfDeoptimized() {
|
||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||
__ LoadTaggedPointerField(
|
||||
kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
||||
__ LoadTaggedField(kScratchReg,
|
||||
MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
||||
__ Lw(kScratchReg,
|
||||
FieldMemOperand(kScratchReg, Code::kKindSpecificFlagsOffset));
|
||||
__ And(kScratchReg, kScratchReg,
|
||||
@ -722,14 +722,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
Register func = i.InputOrZeroRegister(0);
|
||||
if (v8_flags.debug_code) {
|
||||
// Check the function's context matches the context argument.
|
||||
__ LoadTaggedPointerField(
|
||||
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
|
||||
__ LoadTaggedField(kScratchReg,
|
||||
FieldMemOperand(func, JSFunction::kContextOffset));
|
||||
__ Assert(eq, AbortReason::kWrongFunctionContext, cp,
|
||||
Operand(kScratchReg));
|
||||
}
|
||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||
__ LoadTaggedPointerField(a2,
|
||||
FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||
__ LoadTaggedField(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||
__ CallCodeObject(a2);
|
||||
RecordCallPosition(instr);
|
||||
frame_access_state()->ClearSPDelta();
|
||||
@ -2194,18 +2193,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ DecompressTaggedSigned(result, operand);
|
||||
break;
|
||||
}
|
||||
case kRiscvLoadDecompressTaggedPointer: {
|
||||
case kRiscvLoadDecompressTagged: {
|
||||
CHECK(instr->HasOutput());
|
||||
Register result = i.OutputRegister();
|
||||
MemOperand operand = i.MemoryOperand();
|
||||
__ DecompressTaggedPointer(result, operand);
|
||||
break;
|
||||
}
|
||||
case kRiscvLoadDecompressAnyTagged: {
|
||||
CHECK(instr->HasOutput());
|
||||
Register result = i.OutputRegister();
|
||||
MemOperand operand = i.MemoryOperand();
|
||||
__ DecompressAnyTagged(result, operand);
|
||||
__ DecompressTagged(result, operand);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
@ -64,8 +64,7 @@ namespace compiler {
|
||||
V(RiscvWord64AtomicExchangeUint64) \
|
||||
V(RiscvStoreCompressTagged) \
|
||||
V(RiscvLoadDecompressTaggedSigned) \
|
||||
V(RiscvLoadDecompressTaggedPointer) \
|
||||
V(RiscvLoadDecompressAnyTagged) \
|
||||
V(RiscvLoadDecompressTagged) \
|
||||
V(RiscvWord64AtomicCompareExchangeUint64)
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
#define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \
|
||||
|
@ -377,8 +377,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kRiscvUlwu:
|
||||
case kRiscvWord64AtomicLoadUint64:
|
||||
case kRiscvLoadDecompressTaggedSigned:
|
||||
case kRiscvLoadDecompressTaggedPointer:
|
||||
case kRiscvLoadDecompressAnyTagged:
|
||||
case kRiscvLoadDecompressTagged:
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
case kRiscvWord32AtomicPairLoad:
|
||||
#endif
|
||||
|
@ -280,10 +280,10 @@ void InstructionSelector::VisitLoad(Node* node) {
|
||||
opcode = kRiscvLoadDecompressTaggedSigned;
|
||||
break;
|
||||
case MachineRepresentation::kTaggedPointer:
|
||||
opcode = kRiscvLoadDecompressTaggedPointer;
|
||||
opcode = kRiscvLoadDecompressTagged;
|
||||
break;
|
||||
case MachineRepresentation::kTagged:
|
||||
opcode = kRiscvLoadDecompressAnyTagged;
|
||||
opcode = kRiscvLoadDecompressTagged;
|
||||
break;
|
||||
#else
|
||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||
@ -1938,10 +1938,10 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
|
||||
opcode = kRiscv64LdDecompressTaggedSigned;
|
||||
break;
|
||||
case MachineRepresentation::kTaggedPointer:
|
||||
opcode = kRiscv64LdDecompressTaggedPointer;
|
||||
opcode = kRiscv64LdDecompressTagged;
|
||||
break;
|
||||
case MachineRepresentation::kTagged:
|
||||
opcode = kRiscv64LdDecompressAnyTagged;
|
||||
opcode = kRiscv64LdDecompressTagged;
|
||||
break;
|
||||
#else
|
||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||
|
@ -209,7 +209,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
||||
|
||||
void Generate() final {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
__ DecompressTaggedPointer(value_, value_);
|
||||
__ DecompressTagged(value_, value_);
|
||||
}
|
||||
__ CheckPageFlag(
|
||||
value_, scratch0_,
|
||||
@ -1134,8 +1134,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
||||
}
|
||||
|
||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||
__ LoadTaggedPointerField(
|
||||
ip, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
|
||||
__ LoadTaggedField(ip, MemOperand(kJavaScriptCallCodeStartRegister, offset),
|
||||
r0);
|
||||
__ LoadS32(ip, FieldMemOperand(ip, Code::kKindSpecificFlagsOffset));
|
||||
__ TestBit(ip, InstructionStream::kMarkedForDeoptimizationBit);
|
||||
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||
@ -1240,14 +1240,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
Register func = i.InputRegister(0);
|
||||
if (v8_flags.debug_code) {
|
||||
// Check the function's context matches the context argument.
|
||||
__ LoadTaggedPointerField(
|
||||
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
|
||||
__ LoadTaggedField(kScratchReg,
|
||||
FieldMemOperand(func, JSFunction::kContextOffset));
|
||||
__ CmpS64(cp, kScratchReg);
|
||||
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
||||
}
|
||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||
__ LoadTaggedPointerField(r4,
|
||||
FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||
__ LoadTaggedField(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||
__ CallCodeObject(r4);
|
||||
RecordCallPosition(instr);
|
||||
frame_access_state()->ClearSPDelta();
|
||||
@ -3175,14 +3174,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
}
|
||||
case kS390_LoadDecompressTaggedPointer: {
|
||||
case kS390_LoadDecompressTagged: {
|
||||
CHECK(instr->HasOutput());
|
||||
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
}
|
||||
case kS390_LoadDecompressAnyTagged: {
|
||||
CHECK(instr->HasOutput());
|
||||
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
|
||||
__ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -398,8 +398,7 @@ namespace compiler {
|
||||
V(S390_LoadSimd128) \
|
||||
V(S390_StoreCompressTagged) \
|
||||
V(S390_LoadDecompressTaggedSigned) \
|
||||
V(S390_LoadDecompressTaggedPointer) \
|
||||
V(S390_LoadDecompressAnyTagged)
|
||||
V(S390_LoadDecompressTagged)
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
// Many instructions support multiple addressing modes. Addressing modes
|
||||
|
@ -358,8 +358,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kS390_LoadReverseSimd128:
|
||||
case kS390_Peek:
|
||||
case kS390_LoadDecompressTaggedSigned:
|
||||
case kS390_LoadDecompressTaggedPointer:
|
||||
case kS390_LoadDecompressAnyTagged:
|
||||
case kS390_LoadDecompressTagged:
|
||||
case kS390_S128Load8Splat:
|
||||
case kS390_S128Load16Splat:
|
||||
case kS390_S128Load32Splat:
|
||||
|
@ -303,10 +303,10 @@ ArchOpcode SelectLoadOpcode(LoadRepresentation load_rep) {
|
||||
opcode = kS390_LoadDecompressTaggedSigned;
|
||||
break;
|
||||
case MachineRepresentation::kTaggedPointer:
|
||||
opcode = kS390_LoadDecompressTaggedPointer;
|
||||
opcode = kS390_LoadDecompressTagged;
|
||||
break;
|
||||
case MachineRepresentation::kTagged:
|
||||
opcode = kS390_LoadDecompressAnyTagged;
|
||||
opcode = kS390_LoadDecompressTagged;
|
||||
break;
|
||||
#else
|
||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||
|
@ -293,7 +293,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
||||
|
||||
void Generate() final {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
__ DecompressTaggedPointer(value_, value_);
|
||||
__ DecompressTagged(value_, value_);
|
||||
}
|
||||
__ CheckPageFlag(
|
||||
value_, scratch0_,
|
||||
@ -1358,8 +1358,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ Assert(equal, AbortReason::kWrongFunctionContext);
|
||||
}
|
||||
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
||||
__ LoadTaggedPointerField(rcx,
|
||||
FieldOperand(func, JSFunction::kCodeOffset));
|
||||
__ LoadTaggedField(rcx, FieldOperand(func, JSFunction::kCodeOffset));
|
||||
__ CallCodeObject(rcx);
|
||||
frame_access_state()->ClearSPDelta();
|
||||
RecordCallPosition(instr);
|
||||
@ -2576,18 +2575,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
DetermineStubCallMode(), kTaggedSize);
|
||||
break;
|
||||
}
|
||||
case kX64MovqDecompressTaggedPointer: {
|
||||
case kX64MovqDecompressTagged: {
|
||||
CHECK(instr->HasOutput());
|
||||
Operand address(i.MemoryOperand());
|
||||
__ DecompressTaggedPointer(i.OutputRegister(), address);
|
||||
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
|
||||
DetermineStubCallMode(), kTaggedSize);
|
||||
break;
|
||||
}
|
||||
case kX64MovqDecompressAnyTagged: {
|
||||
CHECK(instr->HasOutput());
|
||||
Operand address(i.MemoryOperand());
|
||||
__ DecompressAnyTagged(i.OutputRegister(), address);
|
||||
__ DecompressTagged(i.OutputRegister(), address);
|
||||
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
|
||||
DetermineStubCallMode(), kTaggedSize);
|
||||
break;
|
||||
|
@ -173,8 +173,7 @@ namespace compiler {
|
||||
V(X64Float32Abs) \
|
||||
V(X64Float32Neg) \
|
||||
V(X64MovqDecompressTaggedSigned) \
|
||||
V(X64MovqDecompressTaggedPointer) \
|
||||
V(X64MovqDecompressAnyTagged) \
|
||||
V(X64MovqDecompressTagged) \
|
||||
V(X64MovqCompressTagged) \
|
||||
V(X64MovqEncodeSandboxedPointer) \
|
||||
V(X64MovqDecodeSandboxedPointer) \
|
||||
|
@ -403,8 +403,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
}
|
||||
|
||||
case kX64MovqDecompressTaggedSigned:
|
||||
case kX64MovqDecompressTaggedPointer:
|
||||
case kX64MovqDecompressAnyTagged:
|
||||
case kX64MovqDecompressTagged:
|
||||
case kX64MovqCompressTagged:
|
||||
case kX64MovqDecodeSandboxedPointer:
|
||||
case kX64MovqEncodeSandboxedPointer:
|
||||
|
@ -318,10 +318,8 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
|
||||
opcode = kX64MovqDecompressTaggedSigned;
|
||||
break;
|
||||
case MachineRepresentation::kTaggedPointer:
|
||||
opcode = kX64MovqDecompressTaggedPointer;
|
||||
break;
|
||||
case MachineRepresentation::kTagged:
|
||||
opcode = kX64MovqDecompressAnyTagged;
|
||||
opcode = kX64MovqDecompressTagged;
|
||||
break;
|
||||
#else
|
||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||
|
@ -1428,7 +1428,7 @@ int TranslatedState::CreateNextTranslatedValue(
|
||||
|
||||
Address TranslatedState::DecompressIfNeeded(intptr_t value) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
return V8HeapCompressionScheme::DecompressTaggedAny(
|
||||
return V8HeapCompressionScheme::DecompressTagged(
|
||||
isolate(), static_cast<uint32_t>(value));
|
||||
} else {
|
||||
return value;
|
||||
|
@ -1973,8 +1973,7 @@ void WasmStruct::WasmStructPrint(std::ostream& os) {
|
||||
case wasm::kRtt: {
|
||||
Tagged_t raw = base::ReadUnalignedValue<Tagged_t>(field_address);
|
||||
#if V8_COMPRESS_POINTERS
|
||||
Address obj =
|
||||
V8HeapCompressionScheme::DecompressTaggedPointer(address(), raw);
|
||||
Address obj = V8HeapCompressionScheme::DecompressTagged(address(), raw);
|
||||
#else
|
||||
Address obj = raw;
|
||||
#endif
|
||||
@ -3000,7 +2999,7 @@ inline i::Object GetObjectFromRaw(void* object) {
|
||||
if (RoundDown<i::kPtrComprCageBaseAlignment>(object_ptr) == i::kNullAddress) {
|
||||
// Try to decompress pointer.
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
object_ptr = i::V8HeapCompressionScheme::DecompressTaggedAny(
|
||||
object_ptr = i::V8HeapCompressionScheme::DecompressTagged(
|
||||
isolate, static_cast<i::Tagged_t>(object_ptr));
|
||||
}
|
||||
#endif
|
||||
|
@ -1109,7 +1109,7 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
|
||||
if (!HAS_SMI_TAG(value) && value <= 0xffffffff) {
|
||||
// We don't need to update smi values or full pointers.
|
||||
was_compressed = true;
|
||||
*spill_slot.location() = V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
*spill_slot.location() = V8HeapCompressionScheme::DecompressTagged(
|
||||
cage_base, static_cast<Tagged_t>(value));
|
||||
if (DEBUG_BOOL) {
|
||||
// Ensure that the spill slot contains correct heap object.
|
||||
@ -1144,7 +1144,7 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
|
||||
if (!HAS_SMI_TAG(compressed_value)) {
|
||||
was_compressed = slot_contents <= 0xFFFFFFFF;
|
||||
// We don't need to update smi values.
|
||||
*spill_slot.location() = V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
*spill_slot.location() = V8HeapCompressionScheme::DecompressTagged(
|
||||
cage_base, compressed_value);
|
||||
}
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ Object Isolate::VerifyBuiltinsResult(Object result) {
|
||||
// because that's the assumption in generated code (which might call this
|
||||
// builtin).
|
||||
if (!result.IsSmi()) {
|
||||
DCHECK_EQ(result.ptr(), V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
DCHECK_EQ(result.ptr(), V8HeapCompressionScheme::DecompressTagged(
|
||||
this, static_cast<Tagged_t>(result.ptr())));
|
||||
}
|
||||
#endif
|
||||
@ -149,11 +149,11 @@ ObjectPair Isolate::VerifyBuiltinsResult(ObjectPair pair) {
|
||||
// because that's the assumption in generated code (which might call this
|
||||
// builtin).
|
||||
if (!HAS_SMI_TAG(pair.x)) {
|
||||
DCHECK_EQ(pair.x, V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
DCHECK_EQ(pair.x, V8HeapCompressionScheme::DecompressTagged(
|
||||
this, static_cast<Tagged_t>(pair.x)));
|
||||
}
|
||||
if (!HAS_SMI_TAG(pair.y)) {
|
||||
DCHECK_EQ(pair.y, V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
DCHECK_EQ(pair.y, V8HeapCompressionScheme::DecompressTagged(
|
||||
this, static_cast<Tagged_t>(pair.y)));
|
||||
}
|
||||
#endif // V8_COMPRESS_POINTERS
|
||||
|
@ -4140,7 +4140,7 @@ void Isolate::VerifyStaticRoots() {
|
||||
for (Tagged_t cmp_ptr : StaticReadOnlyRootsPointerTable) {
|
||||
Address the_root = roots[idx];
|
||||
Address ptr =
|
||||
V8HeapCompressionScheme::DecompressTaggedPointer(cage_base(), cmp_ptr);
|
||||
V8HeapCompressionScheme::DecompressTagged(cage_base(), cmp_ptr);
|
||||
CHECK_WITH_MSG(the_root == ptr, STATIC_ROOTS_FAILED_MSG);
|
||||
// All roots must fit on first page, since only this page is guaranteed to
|
||||
// have a stable offset from the cage base. If this ever changes we need
|
||||
@ -4378,9 +4378,9 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
|
||||
Address base = code_cage->base();
|
||||
Address last = base + code_cage->size() - 1;
|
||||
PtrComprCageBase code_cage_base{code_cage_base_};
|
||||
CHECK_EQ(base, ComprScheme::DecompressTaggedPointer(
|
||||
CHECK_EQ(base, ComprScheme::DecompressTagged(
|
||||
code_cage_base, ComprScheme::CompressTagged(base)));
|
||||
CHECK_EQ(last, ComprScheme::DecompressTaggedPointer(
|
||||
CHECK_EQ(last, ComprScheme::DecompressTagged(
|
||||
code_cage_base, ComprScheme::CompressTagged(last)));
|
||||
}
|
||||
#endif // V8_EXTERNAL_CODE_SPACE
|
||||
|
@ -38,7 +38,7 @@ SlotCallbackResult UpdateTypedSlotHelper::UpdateTypedSlot(Heap* heap,
|
||||
}
|
||||
case SlotType::kConstPoolEmbeddedObjectCompressed: {
|
||||
HeapObject old_target =
|
||||
HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
|
||||
HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
|
||||
heap->isolate(), base::Memory<Tagged_t>(addr))));
|
||||
HeapObject new_target = old_target;
|
||||
SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
|
||||
@ -80,7 +80,7 @@ HeapObject UpdateTypedSlotHelper::GetTargetObject(Heap* heap,
|
||||
return rinfo.target_object(heap->isolate());
|
||||
}
|
||||
case SlotType::kConstPoolEmbeddedObjectCompressed: {
|
||||
Address full = V8HeapCompressionScheme::DecompressTaggedAny(
|
||||
Address full = V8HeapCompressionScheme::DecompressTagged(
|
||||
heap->isolate(), base::Memory<Tagged_t>(addr));
|
||||
return HeapObject::cast(Object(full));
|
||||
}
|
||||
|
@ -483,10 +483,10 @@ inline void MaglevAssembler::DeoptIfBufferDetached(Register array,
|
||||
->DependOnArrayBufferDetachingProtector()) {
|
||||
// A detached buffer leads to megamorphic feedback, so we won't have a deopt
|
||||
// loop if we deopt here.
|
||||
LoadTaggedPointerField(
|
||||
scratch, FieldMemOperand(array, JSArrayBufferView::kBufferOffset));
|
||||
LoadTaggedPointerField(
|
||||
scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
|
||||
LoadTaggedField(scratch,
|
||||
FieldMemOperand(array, JSArrayBufferView::kBufferOffset));
|
||||
LoadTaggedField(scratch,
|
||||
FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
|
||||
Tst(scratch.W(), Immediate(JSArrayBuffer::WasDetachedBit::kMask));
|
||||
EmitEagerDeoptIf(ne, DeoptimizeReason::kArrayBufferWasDetached, node);
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ void MaglevAssembler::LoadSingleCharacterString(Register result,
|
||||
Register table = scratch;
|
||||
LoadRoot(table, RootIndex::kSingleCharacterStringTable);
|
||||
Add(table, table, Operand(char_code, LSL, kTaggedSizeLog2));
|
||||
DecompressAnyTagged(result, FieldMemOperand(table, FixedArray::kHeaderSize));
|
||||
DecompressTagged(result, FieldMemOperand(table, FixedArray::kHeaderSize));
|
||||
}
|
||||
|
||||
void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
|
||||
@ -604,7 +604,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
||||
|
||||
// Is a thin string.
|
||||
{
|
||||
DecompressAnyTagged(string,
|
||||
DecompressTagged(string,
|
||||
FieldMemOperand(string, ThinString::kActualOffset));
|
||||
B(&loop);
|
||||
}
|
||||
@ -616,7 +616,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
||||
|
||||
Ldr(offset.W(), FieldMemOperand(string, SlicedString::kOffsetOffset));
|
||||
SmiUntag(offset);
|
||||
DecompressAnyTagged(string,
|
||||
DecompressTagged(string,
|
||||
FieldMemOperand(string, SlicedString::kParentOffset));
|
||||
Add(index, index, offset);
|
||||
B(&loop);
|
||||
@ -630,8 +630,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
||||
Ldr(second_string.W(), FieldMemOperand(string, ConsString::kSecondOffset));
|
||||
CompareRoot(second_string, RootIndex::kempty_string);
|
||||
B(deferred_runtime_call, ne);
|
||||
DecompressAnyTagged(string,
|
||||
FieldMemOperand(string, ConsString::kFirstOffset));
|
||||
DecompressTagged(string, FieldMemOperand(string, ConsString::kFirstOffset));
|
||||
B(&loop); // Try again with first string.
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
|
||||
__ CompareObjectType(object, FIRST_JS_OBJECT_TYPE, scratch);
|
||||
__ Assert(ge, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
__ LoadAnyTaggedField(scratch,
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldMemOperand(object, JSObject::kElementsOffset));
|
||||
if (v8_flags.debug_code) {
|
||||
__ AssertNotSmi(scratch);
|
||||
@ -1325,8 +1325,8 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
|
||||
// Deopt if this isn't a thin string.
|
||||
__ Tst(instance_type.W(), Immediate(kThinStringTagBit));
|
||||
__ EmitEagerDeoptIf(eq, DeoptimizeReason::kWrongMap, node);
|
||||
__ LoadTaggedPointerField(
|
||||
object, FieldMemOperand(object, ThinString::kActualOffset));
|
||||
__ LoadTaggedField(object,
|
||||
FieldMemOperand(object, ThinString::kActualOffset));
|
||||
if (v8_flags.debug_code) {
|
||||
__ RecordComment("DCHECK IsInternalizedString");
|
||||
Register scratch = instance_type;
|
||||
@ -1408,7 +1408,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
Register generator = ToRegister(generator_input());
|
||||
Register array = WriteBarrierDescriptor::ObjectRegister();
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
array, FieldMemOperand(generator,
|
||||
JSGeneratorObject::kParametersAndRegistersOffset));
|
||||
|
||||
@ -1524,7 +1524,7 @@ void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm,
|
||||
Register budget = temps.Acquire().W();
|
||||
__ Ldr(feedback_cell,
|
||||
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
feedback_cell,
|
||||
FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
|
||||
__ Ldr(budget,
|
||||
@ -1647,7 +1647,7 @@ void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
|
||||
Register budget = temps.Acquire().W();
|
||||
__ Ldr(feedback_cell,
|
||||
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
feedback_cell,
|
||||
FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
|
||||
__ Move(budget, v8_flags.interrupt_budget);
|
||||
@ -1672,7 +1672,7 @@ void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm,
|
||||
Register budget = temps.Acquire().W();
|
||||
__ Ldr(feedback_cell,
|
||||
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
feedback_cell,
|
||||
FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
|
||||
__ Ldr(budget,
|
||||
@ -1793,7 +1793,7 @@ void LoadFixedArrayElement::GenerateCode(MaglevAssembler* masm,
|
||||
}
|
||||
Register result_reg = ToRegister(result());
|
||||
__ Add(result_reg, elements, Operand(index, LSL, kTaggedSizeLog2));
|
||||
__ DecompressAnyTagged(result_reg,
|
||||
__ DecompressTagged(result_reg,
|
||||
FieldMemOperand(result_reg, FixedArray::kHeaderSize));
|
||||
}
|
||||
|
||||
@ -1829,7 +1829,7 @@ void StoreDoubleField::GenerateCode(MaglevAssembler* masm,
|
||||
Register tmp = temps.Acquire();
|
||||
|
||||
__ AssertNotSmi(object);
|
||||
__ DecompressAnyTagged(tmp, FieldMemOperand(object, offset()));
|
||||
__ DecompressTagged(tmp, FieldMemOperand(object, offset()));
|
||||
__ AssertNotSmi(tmp);
|
||||
__ Move(FieldMemOperand(tmp, HeapNumber::kValueOffset), value);
|
||||
}
|
||||
|
@ -32,9 +32,8 @@ void MaglevAssembler::LoadSingleCharacterString(Register result,
|
||||
DCHECK_LT(char_code, String::kMaxOneByteCharCode);
|
||||
Register table = result;
|
||||
LoadRoot(table, RootIndex::kSingleCharacterStringTable);
|
||||
DecompressAnyTagged(
|
||||
result, FieldMemOperand(
|
||||
table, FixedArray::kHeaderSize + char_code * kTaggedSize));
|
||||
DecompressTagged(result, FieldMemOperand(table, FixedArray::kHeaderSize +
|
||||
char_code * kTaggedSize));
|
||||
}
|
||||
|
||||
void MaglevAssembler::LoadDataField(const PolymorphicAccessInfo& access_info,
|
||||
@ -54,13 +53,12 @@ void MaglevAssembler::LoadDataField(const PolymorphicAccessInfo& access_info,
|
||||
}
|
||||
// The field is in the property array, first load it from there.
|
||||
AssertNotSmi(load_source_object);
|
||||
DecompressAnyTagged(load_source,
|
||||
DecompressTagged(load_source,
|
||||
FieldMemOperand(load_source_object,
|
||||
JSReceiver::kPropertiesOrHashOffset));
|
||||
}
|
||||
AssertNotSmi(load_source);
|
||||
DecompressAnyTagged(result,
|
||||
FieldMemOperand(load_source, field_index.offset()));
|
||||
DecompressTagged(result, FieldMemOperand(load_source, field_index.offset()));
|
||||
}
|
||||
|
||||
} // namespace maglev
|
||||
|
@ -953,7 +953,7 @@ void LoadDoubleField::GenerateCode(MaglevAssembler* masm,
|
||||
Register tmp = temps.Acquire();
|
||||
Register object = ToRegister(object_input());
|
||||
__ AssertNotSmi(object);
|
||||
__ DecompressAnyTagged(tmp, FieldMemOperand(object, offset()));
|
||||
__ DecompressTagged(tmp, FieldMemOperand(object, offset()));
|
||||
__ AssertNotSmi(tmp);
|
||||
__ LoadHeapNumberValue(ToDoubleRegister(result()), tmp);
|
||||
}
|
||||
@ -966,8 +966,7 @@ void LoadTaggedField::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
Register object = ToRegister(object_input());
|
||||
__ AssertNotSmi(object);
|
||||
__ DecompressAnyTagged(ToRegister(result()),
|
||||
FieldMemOperand(object, offset()));
|
||||
__ DecompressTagged(ToRegister(result()), FieldMemOperand(object, offset()));
|
||||
}
|
||||
|
||||
namespace {
|
||||
@ -1065,7 +1064,7 @@ void LoadPolymorphicTaggedField::GenerateCode(MaglevAssembler* masm,
|
||||
Register cell = map; // Reuse scratch.
|
||||
__ Move(cell, access_info.cell());
|
||||
__ AssertNotSmi(cell);
|
||||
__ DecompressAnyTagged(result,
|
||||
__ DecompressTagged(result,
|
||||
FieldMemOperand(cell, Cell::kValueOffset));
|
||||
break;
|
||||
}
|
||||
@ -1878,7 +1877,7 @@ void GeneratorRestoreRegister::GenerateCode(MaglevAssembler* masm,
|
||||
Register value = (array == result_reg ? temp : result_reg);
|
||||
|
||||
// Loads the current value in the generator register file.
|
||||
__ DecompressAnyTagged(
|
||||
__ DecompressTagged(
|
||||
value, FieldMemOperand(array, FixedArray::OffsetOfElementAt(index())));
|
||||
|
||||
// And trashs it with StaleRegisterConstant.
|
||||
@ -2429,7 +2428,7 @@ void CallKnownJSFunction::GenerateCode(MaglevAssembler* masm,
|
||||
__ CallBuiltin(shared_function_info().builtin_id());
|
||||
} else {
|
||||
__ AssertCallableFunction(kJavaScriptCallTargetRegister);
|
||||
__ LoadTaggedPointerField(kJavaScriptCallCodeStartRegister,
|
||||
__ LoadTaggedField(kJavaScriptCallCodeStartRegister,
|
||||
FieldMemOperand(kJavaScriptCallTargetRegister,
|
||||
JSFunction::kCodeOffset));
|
||||
__ CallCodeObject(kJavaScriptCallCodeStartRegister);
|
||||
|
@ -372,10 +372,10 @@ inline void MaglevAssembler::DeoptIfBufferDetached(Register array,
|
||||
->DependOnArrayBufferDetachingProtector()) {
|
||||
// A detached buffer leads to megamorphic feedback, so we won't have a deopt
|
||||
// loop if we deopt here.
|
||||
LoadTaggedPointerField(
|
||||
scratch, FieldOperand(array, JSArrayBufferView::kBufferOffset));
|
||||
LoadTaggedPointerField(
|
||||
scratch, FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset));
|
||||
LoadTaggedField(scratch,
|
||||
FieldOperand(array, JSArrayBufferView::kBufferOffset));
|
||||
LoadTaggedField(scratch,
|
||||
FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset));
|
||||
testl(scratch, Immediate(JSArrayBuffer::WasDetachedBit::kMask));
|
||||
EmitEagerDeoptIf(not_zero, DeoptimizeReason::kArrayBufferWasDetached, node);
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ void MaglevAssembler::LoadSingleCharacterString(Register result,
|
||||
DCHECK_NE(char_code, scratch);
|
||||
Register table = scratch;
|
||||
LoadRoot(table, RootIndex::kSingleCharacterStringTable);
|
||||
DecompressAnyTagged(result, FieldOperand(table, char_code, times_tagged_size,
|
||||
DecompressTagged(result, FieldOperand(table, char_code, times_tagged_size,
|
||||
FixedArray::kHeaderSize));
|
||||
}
|
||||
|
||||
@ -224,8 +224,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
||||
|
||||
// Is a thin string.
|
||||
{
|
||||
DecompressAnyTagged(string,
|
||||
FieldOperand(string, ThinString::kActualOffset));
|
||||
DecompressTagged(string, FieldOperand(string, ThinString::kActualOffset));
|
||||
jmp(&loop, Label::kNear);
|
||||
}
|
||||
|
||||
@ -234,8 +233,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
||||
Register offset = scratch;
|
||||
movl(offset, FieldOperand(string, SlicedString::kOffsetOffset));
|
||||
SmiUntag(offset);
|
||||
DecompressAnyTagged(string,
|
||||
FieldOperand(string, SlicedString::kParentOffset));
|
||||
DecompressTagged(string, FieldOperand(string, SlicedString::kParentOffset));
|
||||
addl(index, offset);
|
||||
jmp(&loop, Label::kNear);
|
||||
}
|
||||
@ -245,7 +243,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
||||
CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
|
||||
RootIndex::kempty_string);
|
||||
j(not_equal, deferred_runtime_call);
|
||||
DecompressAnyTagged(string, FieldOperand(string, ConsString::kFirstOffset));
|
||||
DecompressTagged(string, FieldOperand(string, ConsString::kFirstOffset));
|
||||
jmp(&loop, Label::kNear); // Try again with first string.
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
Register generator = ToRegister(generator_input());
|
||||
Register array = WriteBarrierDescriptor::ObjectRegister();
|
||||
__ LoadTaggedPointerField(
|
||||
__ LoadTaggedField(
|
||||
array, FieldOperand(generator,
|
||||
JSGeneratorObject::kParametersAndRegistersOffset));
|
||||
|
||||
@ -498,7 +498,7 @@ void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
|
||||
__ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister);
|
||||
__ Assert(greater_equal, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
__ LoadAnyTaggedField(kScratchRegister,
|
||||
__ LoadTaggedField(kScratchRegister,
|
||||
FieldOperand(object, JSObject::kElementsOffset));
|
||||
if (v8_flags.debug_code) {
|
||||
__ AssertNotSmi(kScratchRegister);
|
||||
@ -548,8 +548,8 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
|
||||
// Deopt if this isn't a thin string.
|
||||
__ testb(map_tmp, Immediate(kThinStringTagBit));
|
||||
__ EmitEagerDeoptIf(zero, DeoptimizeReason::kWrongMap, node);
|
||||
__ LoadTaggedPointerField(
|
||||
object, FieldOperand(object, ThinString::kActualOffset));
|
||||
__ LoadTaggedField(object,
|
||||
FieldOperand(object, ThinString::kActualOffset));
|
||||
if (v8_flags.debug_code) {
|
||||
__ RecordComment("DCHECK IsInternalizedString");
|
||||
__ LoadMap(map_tmp, object);
|
||||
@ -721,7 +721,7 @@ void LoadFixedArrayElement::GenerateCode(MaglevAssembler* masm,
|
||||
__ cmpq(index, Immediate(0));
|
||||
__ Assert(above_equal, AbortReason::kUnexpectedNegativeValue);
|
||||
}
|
||||
__ DecompressAnyTagged(result_reg,
|
||||
__ DecompressTagged(result_reg,
|
||||
FieldOperand(elements, index, times_tagged_size,
|
||||
FixedArray::kHeaderSize));
|
||||
}
|
||||
@ -1092,7 +1092,7 @@ void StoreDoubleField::GenerateCode(MaglevAssembler* masm,
|
||||
DoubleRegister value = ToDoubleRegister(value_input());
|
||||
|
||||
__ AssertNotSmi(object);
|
||||
__ DecompressAnyTagged(tmp, FieldOperand(object, offset()));
|
||||
__ DecompressTagged(tmp, FieldOperand(object, offset()));
|
||||
__ AssertNotSmi(tmp);
|
||||
__ Movsd(FieldOperand(tmp, HeapNumber::kValueOffset), value);
|
||||
}
|
||||
@ -2162,8 +2162,8 @@ void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm,
|
||||
MaglevAssembler::ScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
|
||||
__ addl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
|
||||
Immediate(amount()));
|
||||
}
|
||||
@ -2253,8 +2253,8 @@ void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
|
||||
__ incl(FieldOperand(scratch0, FeedbackVector::kProfilerTicksOffset));
|
||||
// JSFunction::SetInterruptBudget.
|
||||
__ movq(scratch0, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
scratch0, FieldOperand(scratch0, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(scratch0,
|
||||
FieldOperand(scratch0, JSFunction::kFeedbackCellOffset));
|
||||
__ movl(FieldOperand(scratch0, FeedbackCell::kInterruptBudgetOffset),
|
||||
Immediate(v8_flags.interrupt_budget));
|
||||
__ jmp(*done);
|
||||
@ -2272,8 +2272,8 @@ void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm,
|
||||
MaglevAssembler::ScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(scratch,
|
||||
FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
|
||||
__ subl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
|
||||
Immediate(amount()));
|
||||
ZoneLabelRef done(masm);
|
||||
|
@ -35,12 +35,12 @@ bool CompressedObjectSlot::contains_map_value(Address raw_value) const {
|
||||
|
||||
Object CompressedObjectSlot::operator*() const {
|
||||
Tagged_t value = *location();
|
||||
return Object(TCompressionScheme::DecompressTaggedAny(address(), value));
|
||||
return Object(TCompressionScheme::DecompressTagged(address(), value));
|
||||
}
|
||||
|
||||
Object CompressedObjectSlot::load(PtrComprCageBase cage_base) const {
|
||||
Tagged_t value = *location();
|
||||
return Object(TCompressionScheme::DecompressTaggedAny(cage_base, value));
|
||||
return Object(TCompressionScheme::DecompressTagged(cage_base, value));
|
||||
}
|
||||
|
||||
void CompressedObjectSlot::store(Object value) const {
|
||||
@ -63,17 +63,17 @@ Map CompressedObjectSlot::load_map() const {
|
||||
|
||||
Object CompressedObjectSlot::Acquire_Load() const {
|
||||
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
|
||||
return Object(TCompressionScheme::DecompressTaggedAny(address(), value));
|
||||
return Object(TCompressionScheme::DecompressTagged(address(), value));
|
||||
}
|
||||
|
||||
Object CompressedObjectSlot::Relaxed_Load() const {
|
||||
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
||||
return Object(TCompressionScheme::DecompressTaggedAny(address(), value));
|
||||
return Object(TCompressionScheme::DecompressTagged(address(), value));
|
||||
}
|
||||
|
||||
Object CompressedObjectSlot::Relaxed_Load(PtrComprCageBase cage_base) const {
|
||||
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
||||
return Object(TCompressionScheme::DecompressTaggedAny(cage_base, value));
|
||||
return Object(TCompressionScheme::DecompressTagged(cage_base, value));
|
||||
}
|
||||
|
||||
void CompressedObjectSlot::Relaxed_Store(Object value) const {
|
||||
@ -92,7 +92,7 @@ Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
|
||||
Tagged_t target_ptr = TCompressionScheme::CompressTagged(target.ptr());
|
||||
Tagged_t result =
|
||||
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
|
||||
return Object(TCompressionScheme::DecompressTaggedAny(address(), result));
|
||||
return Object(TCompressionScheme::DecompressTagged(address(), result));
|
||||
}
|
||||
|
||||
//
|
||||
@ -101,12 +101,12 @@ Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
|
||||
|
||||
MaybeObject CompressedMaybeObjectSlot::operator*() const {
|
||||
Tagged_t value = *location();
|
||||
return MaybeObject(TCompressionScheme::DecompressTaggedAny(address(), value));
|
||||
return MaybeObject(TCompressionScheme::DecompressTagged(address(), value));
|
||||
}
|
||||
|
||||
MaybeObject CompressedMaybeObjectSlot::load(PtrComprCageBase cage_base) const {
|
||||
Tagged_t value = *location();
|
||||
return MaybeObject(TCompressionScheme::DecompressTaggedAny(cage_base, value));
|
||||
return MaybeObject(TCompressionScheme::DecompressTagged(cage_base, value));
|
||||
}
|
||||
|
||||
void CompressedMaybeObjectSlot::store(MaybeObject value) const {
|
||||
@ -115,13 +115,13 @@ void CompressedMaybeObjectSlot::store(MaybeObject value) const {
|
||||
|
||||
MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
|
||||
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
||||
return MaybeObject(TCompressionScheme::DecompressTaggedAny(address(), value));
|
||||
return MaybeObject(TCompressionScheme::DecompressTagged(address(), value));
|
||||
}
|
||||
|
||||
MaybeObject CompressedMaybeObjectSlot::Relaxed_Load(
|
||||
PtrComprCageBase cage_base) const {
|
||||
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
||||
return MaybeObject(TCompressionScheme::DecompressTaggedAny(cage_base, value));
|
||||
return MaybeObject(TCompressionScheme::DecompressTagged(cage_base, value));
|
||||
}
|
||||
|
||||
void CompressedMaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
|
||||
@ -143,14 +143,14 @@ void CompressedMaybeObjectSlot::Release_CompareAndSwap(
|
||||
HeapObjectReference CompressedHeapObjectSlot::operator*() const {
|
||||
Tagged_t value = *location();
|
||||
return HeapObjectReference(
|
||||
TCompressionScheme::DecompressTaggedPointer(address(), value));
|
||||
TCompressionScheme::DecompressTagged(address(), value));
|
||||
}
|
||||
|
||||
HeapObjectReference CompressedHeapObjectSlot::load(
|
||||
PtrComprCageBase cage_base) const {
|
||||
Tagged_t value = *location();
|
||||
return HeapObjectReference(
|
||||
TCompressionScheme::DecompressTaggedPointer(cage_base, value));
|
||||
TCompressionScheme::DecompressTagged(cage_base, value));
|
||||
}
|
||||
|
||||
void CompressedHeapObjectSlot::store(HeapObjectReference value) const {
|
||||
@ -161,7 +161,7 @@ HeapObject CompressedHeapObjectSlot::ToHeapObject() const {
|
||||
Tagged_t value = *location();
|
||||
DCHECK(HAS_STRONG_HEAP_OBJECT_TAG(value));
|
||||
return HeapObject::cast(
|
||||
Object(TCompressionScheme::DecompressTaggedPointer(address(), value)));
|
||||
Object(TCompressionScheme::DecompressTagged(address(), value)));
|
||||
}
|
||||
|
||||
void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const {
|
||||
@ -176,7 +176,7 @@ template <typename CompressionScheme>
|
||||
Object OffHeapCompressedObjectSlot<CompressionScheme>::load(
|
||||
PtrComprCageBase cage_base) const {
|
||||
Tagged_t value = *TSlotBase::location();
|
||||
return Object(CompressionScheme::DecompressTaggedAny(cage_base, value));
|
||||
return Object(CompressionScheme::DecompressTagged(cage_base, value));
|
||||
}
|
||||
|
||||
template <typename CompressionScheme>
|
||||
@ -188,14 +188,14 @@ template <typename CompressionScheme>
|
||||
Object OffHeapCompressedObjectSlot<CompressionScheme>::Relaxed_Load(
|
||||
PtrComprCageBase cage_base) const {
|
||||
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(TSlotBase::location());
|
||||
return Object(CompressionScheme::DecompressTaggedAny(cage_base, value));
|
||||
return Object(CompressionScheme::DecompressTagged(cage_base, value));
|
||||
}
|
||||
|
||||
template <typename CompressionScheme>
|
||||
Object OffHeapCompressedObjectSlot<CompressionScheme>::Acquire_Load(
|
||||
PtrComprCageBase cage_base) const {
|
||||
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(TSlotBase::location());
|
||||
return Object(CompressionScheme::DecompressTaggedAny(cage_base, value));
|
||||
return Object(CompressionScheme::DecompressTagged(cage_base, value));
|
||||
}
|
||||
|
||||
template <typename CompressionScheme>
|
||||
|
@ -479,8 +479,8 @@ void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
|
||||
AtomicSlot end(start + sort_size);
|
||||
std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) {
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
Object a(V8HeapCompressionScheme::DecompressTaggedAny(isolate, elementA));
|
||||
Object b(V8HeapCompressionScheme::DecompressTaggedAny(isolate, elementB));
|
||||
Object a(V8HeapCompressionScheme::DecompressTagged(isolate, elementA));
|
||||
Object b(V8HeapCompressionScheme::DecompressTagged(isolate, elementB));
|
||||
#else
|
||||
Object a(elementA);
|
||||
Object b(elementB);
|
||||
|
@ -84,7 +84,7 @@ HeapObjectReference HeapObjectReference::ClearedValue(
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
// This is necessary to make pointer decompression computation also
|
||||
// suitable for cleared weak references.
|
||||
Address raw_value = V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
Address raw_value = V8HeapCompressionScheme::DecompressTagged(
|
||||
cage_base, kClearedWeakHeapObjectLower32);
|
||||
#else
|
||||
Address raw_value = kClearedWeakHeapObjectLower32;
|
||||
|
@ -35,10 +35,9 @@ Address TaggedField<T, kFieldOffset, CompressionScheme>::tagged_to_full(
|
||||
if (kIsSmi) {
|
||||
return CompressionScheme::DecompressTaggedSigned(tagged_value);
|
||||
} else if (kIsHeapObject) {
|
||||
return CompressionScheme::DecompressTaggedPointer(on_heap_addr,
|
||||
tagged_value);
|
||||
return CompressionScheme::DecompressTagged(on_heap_addr, tagged_value);
|
||||
} else {
|
||||
return CompressionScheme::DecompressTaggedAny(on_heap_addr, tagged_value);
|
||||
return CompressionScheme::DecompressTagged(on_heap_addr, tagged_value);
|
||||
}
|
||||
#else
|
||||
return tagged_value;
|
||||
|
@ -112,8 +112,7 @@ bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfStrong(
|
||||
if (kIsFull) return GetHeapObjectIfStrong(result);
|
||||
// Implementation for compressed pointers.
|
||||
if (IsStrong()) {
|
||||
*result =
|
||||
HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
|
||||
*result = HeapObject::cast(Object(CompressionScheme::DecompressTagged(
|
||||
isolate, static_cast<Tagged_t>(ptr_))));
|
||||
return true;
|
||||
}
|
||||
@ -138,7 +137,7 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeStrong(
|
||||
if (kIsFull) return GetHeapObjectAssumeStrong();
|
||||
// Implementation for compressed pointers.
|
||||
DCHECK(IsStrong());
|
||||
return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
|
||||
return HeapObject::cast(Object(CompressionScheme::DecompressTagged(
|
||||
isolate, static_cast<Tagged_t>(ptr_))));
|
||||
}
|
||||
|
||||
@ -224,11 +223,11 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject(
|
||||
DCHECK(!IsSmi());
|
||||
if (kCanBeWeak) {
|
||||
DCHECK(!IsCleared());
|
||||
return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
|
||||
return HeapObject::cast(Object(CompressionScheme::DecompressTagged(
|
||||
isolate, static_cast<Tagged_t>(ptr_) & ~kWeakHeapObjectMask)));
|
||||
} else {
|
||||
DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
|
||||
return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
|
||||
return HeapObject::cast(Object(CompressionScheme::DecompressTagged(
|
||||
isolate, static_cast<Tagged_t>(ptr_))));
|
||||
}
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ inline StrongTaggedValue::StrongTaggedValue(Object o)
|
||||
|
||||
Object StrongTaggedValue::ToObject(Isolate* isolate, StrongTaggedValue object) {
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
return Object(CompressionScheme::DecompressTaggedAny(isolate, object.ptr()));
|
||||
return Object(CompressionScheme::DecompressTagged(isolate, object.ptr()));
|
||||
#else
|
||||
return Object(object.ptr());
|
||||
#endif
|
||||
@ -49,7 +49,7 @@ inline TaggedValue::TaggedValue(MaybeObject o)
|
||||
MaybeObject TaggedValue::ToMaybeObject(Isolate* isolate, TaggedValue object) {
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
return MaybeObject(
|
||||
CompressionScheme::DecompressTaggedAny(isolate, object.ptr()));
|
||||
CompressionScheme::DecompressTagged(isolate, object.ptr()));
|
||||
#else
|
||||
return MaybeObject(object.ptr());
|
||||
#endif
|
||||
|
@ -125,7 +125,7 @@ void ReadOnlyRoots::VerifyNameForProtectorsPages() const {
|
||||
|
||||
Address ReadOnlyRoots::at(RootIndex root_index) const {
|
||||
#if V8_STATIC_ROOTS_BOOL
|
||||
return V8HeapCompressionScheme::DecompressTaggedPointer(
|
||||
return V8HeapCompressionScheme::DecompressTagged(
|
||||
V8HeapCompressionScheme::base(),
|
||||
StaticReadOnlyRootsPointerTable[static_cast<int>(root_index)]);
|
||||
#else
|
||||
|
@ -75,8 +75,7 @@ void ReadOnlyRoots::InitFromStaticRootsTable(Address cage_base) {
|
||||
#if V8_STATIC_ROOTS_BOOL
|
||||
RootIndex pos = RootIndex::kFirstReadOnlyRoot;
|
||||
for (auto element : StaticReadOnlyRootsPointerTable) {
|
||||
auto ptr =
|
||||
V8HeapCompressionScheme::DecompressTaggedPointer(cage_base, element);
|
||||
auto ptr = V8HeapCompressionScheme::DecompressTagged(cage_base, element);
|
||||
DCHECK(!is_initialized(pos));
|
||||
read_only_roots_[static_cast<size_t>(pos)] = ptr;
|
||||
++pos;
|
||||
|
@ -438,7 +438,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
|
||||
Register instance,
|
||||
int offset) {
|
||||
DCHECK_LE(0, offset);
|
||||
LoadTaggedPointerField(dst, MemOperand{instance, offset});
|
||||
LoadTaggedField(dst, MemOperand{instance, offset});
|
||||
}
|
||||
|
||||
void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
|
||||
@ -461,7 +461,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
|
||||
unsigned shift_amount = !needs_shift ? 0 : COMPRESS_POINTERS_BOOL ? 2 : 3;
|
||||
MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
|
||||
offset_imm, false, shift_amount);
|
||||
LoadTaggedPointerField(dst, src_op);
|
||||
LoadTaggedField(dst, src_op);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
|
||||
@ -502,7 +502,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
|
||||
bind(&write_barrier);
|
||||
JumpIfSmi(src.gp(), &exit);
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressTaggedPointer(src.gp(), src.gp());
|
||||
DecompressTagged(src.gp(), src.gp());
|
||||
}
|
||||
CheckPageFlag(src.gp(),
|
||||
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
||||
|
@ -266,7 +266,7 @@ void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
|
||||
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
|
||||
Register instance,
|
||||
int offset) {
|
||||
LoadTaggedPointerField(dst, MemOperand(instance, offset), r0);
|
||||
LoadTaggedField(dst, MemOperand(instance, offset), r0);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::SpillInstance(Register instance) {
|
||||
@ -283,7 +283,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
|
||||
ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
|
||||
offset_reg = ip;
|
||||
}
|
||||
LoadTaggedPointerField(dst, MemOperand(src_addr, offset_reg, offset_imm), r0);
|
||||
LoadTaggedField(dst, MemOperand(src_addr, offset_reg, offset_imm), r0);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
|
||||
@ -310,7 +310,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
|
||||
bind(&write_barrier);
|
||||
JumpIfSmi(src.gp(), &exit);
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressTaggedPointer(src.gp(), src.gp());
|
||||
DecompressTagged(src.gp(), src.gp());
|
||||
}
|
||||
CheckPageFlag(src.gp(), ip,
|
||||
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
||||
|
@ -196,7 +196,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
|
||||
Register instance,
|
||||
int offset) {
|
||||
DCHECK_LE(0, offset);
|
||||
LoadTaggedPointerField(dst, MemOperand{instance, offset});
|
||||
LoadTaggedField(dst, MemOperand{instance, offset});
|
||||
}
|
||||
|
||||
void LiftoffAssembler::SpillInstance(Register instance) {
|
||||
|
@ -246,7 +246,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
|
||||
Register instance,
|
||||
int offset) {
|
||||
DCHECK_LE(0, offset);
|
||||
LoadTaggedPointerField(dst, MemOperand(instance, offset));
|
||||
LoadTaggedField(dst, MemOperand(instance, offset));
|
||||
}
|
||||
|
||||
void LiftoffAssembler::SpillInstance(Register instance) {
|
||||
@ -264,7 +264,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
|
||||
ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
|
||||
offset_reg = ip;
|
||||
}
|
||||
LoadTaggedPointerField(
|
||||
LoadTaggedField(
|
||||
dst,
|
||||
MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
|
||||
}
|
||||
@ -295,7 +295,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
|
||||
bind(&write_barrier);
|
||||
JumpIfSmi(src.gp(), &exit);
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressTaggedPointer(src.gp(), src.gp());
|
||||
DecompressTagged(src.gp(), src.gp());
|
||||
}
|
||||
CheckPageFlag(src.gp(), r1,
|
||||
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
||||
|
@ -352,7 +352,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
|
||||
Register instance,
|
||||
int offset) {
|
||||
DCHECK_LE(0, offset);
|
||||
LoadTaggedPointerField(dst, Operand(instance, offset));
|
||||
LoadTaggedField(dst, Operand(instance, offset));
|
||||
}
|
||||
|
||||
void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
|
||||
@ -381,7 +381,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
|
||||
Operand src_op =
|
||||
liftoff::GetMemOp(this, src_addr, offset_reg,
|
||||
static_cast<uint32_t>(offset_imm), scale_factor);
|
||||
LoadTaggedPointerField(dst, src_op);
|
||||
LoadTaggedField(dst, src_op);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
|
||||
@ -414,7 +414,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
|
||||
bind(&write_barrier);
|
||||
JumpIfSmi(src.gp(), &exit, Label::kNear);
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressTaggedPointer(src.gp(), src.gp());
|
||||
DecompressTagged(src.gp(), src.gp());
|
||||
}
|
||||
CheckPageFlag(src.gp(), scratch,
|
||||
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
||||
|
@ -28,7 +28,7 @@ uintptr_t EnsureDecompressed(uintptr_t address,
|
||||
if (!COMPRESS_POINTERS_BOOL || !IsPointerCompressed(address)) return address;
|
||||
// TODO(v8:11880): ExternalCodeCompressionScheme might be needed here for
|
||||
// decompressing Code pointers from external code space.
|
||||
return i::V8HeapCompressionScheme::DecompressTaggedAny(
|
||||
return i::V8HeapCompressionScheme::DecompressTagged(
|
||||
any_uncompressed_ptr, static_cast<i::Tagged_t>(address));
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ if (hasattr(v8heapconst, 'HEAP_FIRST_PAGES')): # Only exists in ptr-compr build
|
||||
if (space_name in expected_spaces):
|
||||
out = out + ' if (heap_addresses->' + space_name + '_first_page == 0) {\n'
|
||||
out = out + ' heap_addresses->' + space_name + \
|
||||
'_first_page = i::V8HeapCompressionScheme::DecompressTaggedPointer(' + \
|
||||
'_first_page = i::V8HeapCompressionScheme::DecompressTagged(' + \
|
||||
'any_uncompressed_ptr, ' + str(offset) + ');\n'
|
||||
out = out + ' }\n'
|
||||
out = out + '}\n'
|
||||
|
Loading…
Reference in New Issue
Block a user