[ptrcomp] Remove the distinction of TaggedAny and TaggedPointer
Known-pointer decompression used to be distinct from any-tagged-value decompression, since the latter used to detect Smis and decompress them with sign extension. However, we got rid of this distinction when we introduced Smi-corrupting loads (allowing the top 32-bits of uncompressed Smis to be undefined), which means that the TaggedPointer and TaggedAny decompression is now identical. We can remove a bunch of duplicate code by removing this distinction. Change-Id: Id66671497d63ed885f9e537494c011317dfd4788 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4221398 Reviewed-by: Toon Verwaest <verwaest@chromium.org> Commit-Queue: Toon Verwaest <verwaest@chromium.org> Auto-Submit: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/main@{#85647}
This commit is contained in:
parent
1c162c83b1
commit
81aa89592b
@ -365,8 +365,7 @@ Local<Value> Context::GetEmbedderData(int index) {
|
|||||||
#ifdef V8_COMPRESS_POINTERS
|
#ifdef V8_COMPRESS_POINTERS
|
||||||
// We read the full pointer value and then decompress it in order to avoid
|
// We read the full pointer value and then decompress it in order to avoid
|
||||||
// dealing with potential endiannes issues.
|
// dealing with potential endiannes issues.
|
||||||
value =
|
value = I::DecompressTaggedField(embedder_data, static_cast<uint32_t>(value));
|
||||||
I::DecompressTaggedAnyField(embedder_data, static_cast<uint32_t>(value));
|
|
||||||
#endif
|
#endif
|
||||||
internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
|
internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
|
||||||
*reinterpret_cast<A*>(this));
|
*reinterpret_cast<A*>(this));
|
||||||
|
@ -807,7 +807,7 @@ class Internals {
|
|||||||
return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
|
return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
V8_INLINE static internal::Address DecompressTaggedAnyField(
|
V8_INLINE static internal::Address DecompressTaggedField(
|
||||||
internal::Address heap_object_ptr, uint32_t value) {
|
internal::Address heap_object_ptr, uint32_t value) {
|
||||||
internal::Address base =
|
internal::Address base =
|
||||||
GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
|
GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
|
||||||
|
@ -717,7 +717,7 @@ Local<Value> Object::GetInternalField(int index) {
|
|||||||
#ifdef V8_COMPRESS_POINTERS
|
#ifdef V8_COMPRESS_POINTERS
|
||||||
// We read the full pointer value and then decompress it in order to avoid
|
// We read the full pointer value and then decompress it in order to avoid
|
||||||
// dealing with potential endiannes issues.
|
// dealing with potential endiannes issues.
|
||||||
value = I::DecompressTaggedAnyField(obj, static_cast<uint32_t>(value));
|
value = I::DecompressTaggedField(obj, static_cast<uint32_t>(value));
|
||||||
#endif
|
#endif
|
||||||
internal::Isolate* isolate =
|
internal::Isolate* isolate =
|
||||||
internal::IsolateFromNeverReadOnlySpaceObject(obj);
|
internal::IsolateFromNeverReadOnlySpaceObject(obj);
|
||||||
|
@ -309,8 +309,8 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ ldr(output, FieldMemOperand(source, offset));
|
__ ldr(output, FieldMemOperand(source, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,11 +326,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ ldr(output, FieldMemOperand(source, offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ ldrh(output, FieldMemOperand(source, offset));
|
__ ldrh(output, FieldMemOperand(source, offset));
|
||||||
@ -372,8 +367,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
|
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
@ -398,8 +393,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ ldr(interrupt_budget,
|
__ ldr(interrupt_budget,
|
||||||
@ -421,8 +416,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ ldr(interrupt_budget,
|
__ ldr(interrupt_budget,
|
||||||
@ -437,16 +432,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -455,33 +450,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
|
@ -369,9 +369,9 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
|
__ LoadTaggedField(output, FieldMemOperand(source, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
@ -386,11 +386,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ Ldrh(output, FieldMemOperand(source, offset));
|
__ Ldrh(output, FieldMemOperand(source, offset));
|
||||||
@ -440,8 +435,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
||||||
__ Ldr(interrupt_budget,
|
__ Ldr(interrupt_budget,
|
||||||
@ -463,8 +458,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
||||||
__ Ldr(interrupt_budget,
|
__ Ldr(interrupt_budget,
|
||||||
@ -479,16 +474,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -497,33 +492,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
|
@ -114,13 +114,12 @@ void BaselineAssembler::SmiUntag(Register output, Register value) {
|
|||||||
|
|
||||||
void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
|
void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
|
||||||
int32_t index) {
|
int32_t index) {
|
||||||
LoadTaggedAnyField(output, array,
|
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
|
||||||
FixedArray::kHeaderSize + index * kTaggedSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
|
void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
|
||||||
__ LoadMap(prototype, object);
|
__ LoadMap(prototype, object);
|
||||||
LoadTaggedPointerField(prototype, prototype, Map::kPrototypeOffset);
|
LoadTaggedField(prototype, prototype, Map::kPrototypeOffset);
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadContext(Register output) {
|
void BaselineAssembler::LoadContext(Register output) {
|
||||||
LoadRegister(output, interpreter::Register::current_context());
|
LoadRegister(output, interpreter::Register::current_context());
|
||||||
|
@ -147,13 +147,11 @@ class BaselineAssembler {
|
|||||||
inline void TailCallBuiltin(Builtin builtin);
|
inline void TailCallBuiltin(Builtin builtin);
|
||||||
inline void CallRuntime(Runtime::FunctionId function, int nargs);
|
inline void CallRuntime(Runtime::FunctionId function, int nargs);
|
||||||
|
|
||||||
inline void LoadTaggedPointerField(Register output, Register source,
|
inline void LoadTaggedField(Register output, Register source, int offset);
|
||||||
int offset);
|
|
||||||
inline void LoadTaggedSignedField(Register output, Register source,
|
inline void LoadTaggedSignedField(Register output, Register source,
|
||||||
int offset);
|
int offset);
|
||||||
inline void LoadTaggedSignedFieldAndUntag(Register output, Register source,
|
inline void LoadTaggedSignedFieldAndUntag(Register output, Register source,
|
||||||
int offset);
|
int offset);
|
||||||
inline void LoadTaggedAnyField(Register output, Register source, int offset);
|
|
||||||
inline void LoadWord16FieldZeroExtend(Register output, Register source,
|
inline void LoadWord16FieldZeroExtend(Register output, Register source,
|
||||||
int offset);
|
int offset);
|
||||||
inline void LoadWord8Field(Register output, Register source, int offset);
|
inline void LoadWord8Field(Register output, Register source, int offset);
|
||||||
@ -170,16 +168,12 @@ class BaselineAssembler {
|
|||||||
// X64 supports complex addressing mode, pointer decompression can be done by
|
// X64 supports complex addressing mode, pointer decompression can be done by
|
||||||
// [%compressed_base + %r1 + K].
|
// [%compressed_base + %r1 + K].
|
||||||
#if V8_TARGET_ARCH_X64
|
#if V8_TARGET_ARCH_X64
|
||||||
inline void LoadTaggedPointerField(TaggedRegister output, Register source,
|
inline void LoadTaggedField(TaggedRegister output, Register source,
|
||||||
int offset);
|
int offset);
|
||||||
inline void LoadTaggedPointerField(TaggedRegister output,
|
inline void LoadTaggedField(TaggedRegister output, TaggedRegister source,
|
||||||
TaggedRegister source, int offset);
|
int offset);
|
||||||
inline void LoadTaggedPointerField(Register output, TaggedRegister source,
|
inline void LoadTaggedField(Register output, TaggedRegister source,
|
||||||
int offset);
|
int offset);
|
||||||
inline void LoadTaggedAnyField(Register output, TaggedRegister source,
|
|
||||||
int offset);
|
|
||||||
inline void LoadTaggedAnyField(TaggedRegister output, TaggedRegister source,
|
|
||||||
int offset);
|
|
||||||
inline void LoadFixedArrayElement(Register output, TaggedRegister array,
|
inline void LoadFixedArrayElement(Register output, TaggedRegister array,
|
||||||
int32_t index);
|
int32_t index);
|
||||||
inline void LoadFixedArrayElement(TaggedRegister output, TaggedRegister array,
|
inline void LoadFixedArrayElement(TaggedRegister output, TaggedRegister array,
|
||||||
|
@ -439,8 +439,8 @@ void BaselineCompiler::LoadFeedbackVector(Register output) {
|
|||||||
|
|
||||||
void BaselineCompiler::LoadClosureFeedbackArray(Register output) {
|
void BaselineCompiler::LoadClosureFeedbackArray(Register output) {
|
||||||
LoadFeedbackVector(output);
|
LoadFeedbackVector(output);
|
||||||
__ LoadTaggedPointerField(output, output,
|
__ LoadTaggedField(output, output,
|
||||||
FeedbackVector::kClosureFeedbackCellArrayOffset);
|
FeedbackVector::kClosureFeedbackCellArrayOffset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineCompiler::SelectBooleanConstant(
|
void BaselineCompiler::SelectBooleanConstant(
|
||||||
@ -754,8 +754,8 @@ void BaselineCompiler::VisitLdaCurrentContextSlot() {
|
|||||||
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
|
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
|
||||||
Register context = scratch_scope.AcquireScratch();
|
Register context = scratch_scope.AcquireScratch();
|
||||||
__ LoadContext(context);
|
__ LoadContext(context);
|
||||||
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
__ LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(Index(0)));
|
Context::OffsetOfElementAt(Index(0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() {
|
void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() {
|
||||||
@ -1350,9 +1350,9 @@ void BaselineCompiler::VisitIntrinsicCreateJSGeneratorObject(
|
|||||||
void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode(
|
void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode(
|
||||||
interpreter::RegisterList args) {
|
interpreter::RegisterList args) {
|
||||||
__ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
|
__ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
|
||||||
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister,
|
__ LoadTaggedField(kInterpreterAccumulatorRegister,
|
||||||
kInterpreterAccumulatorRegister,
|
kInterpreterAccumulatorRegister,
|
||||||
JSGeneratorObject::kResumeModeOffset);
|
JSGeneratorObject::kResumeModeOffset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineCompiler::VisitIntrinsicGeneratorClose(
|
void BaselineCompiler::VisitIntrinsicGeneratorClose(
|
||||||
@ -2211,8 +2211,8 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() {
|
|||||||
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
|
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
|
||||||
|
|
||||||
Register context = scratch_scope.AcquireScratch();
|
Register context = scratch_scope.AcquireScratch();
|
||||||
__ LoadTaggedAnyField(context, generator_object,
|
__ LoadTaggedField(context, generator_object,
|
||||||
JSGeneratorObject::kContextOffset);
|
JSGeneratorObject::kContextOffset);
|
||||||
__ StoreContext(context);
|
__ StoreContext(context);
|
||||||
|
|
||||||
interpreter::JumpTableTargetOffsets offsets =
|
interpreter::JumpTableTargetOffsets offsets =
|
||||||
|
@ -293,8 +293,8 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
(__ Pop(registers), ...);
|
(__ Pop(registers), ...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ mov(output, FieldOperand(source, offset));
|
__ mov(output, FieldOperand(source, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,11 +310,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ mov(output, FieldOperand(source, offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ movzx_w(output, FieldOperand(source, offset));
|
__ movzx_w(output, FieldOperand(source, offset));
|
||||||
@ -354,8 +349,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance distance) {
|
Label::Distance distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, &fallthrough);
|
||||||
|
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
@ -378,8 +373,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||||
Immediate(weight));
|
Immediate(weight));
|
||||||
if (skip_interrupt_label) {
|
if (skip_interrupt_label) {
|
||||||
@ -395,8 +390,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
DCHECK(!AreAliased(feedback_cell, weight));
|
DCHECK(!AreAliased(feedback_cell, weight));
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||||
weight);
|
weight);
|
||||||
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
||||||
@ -405,16 +400,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -423,33 +418,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
|
@ -296,8 +296,8 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ Ld_d(output, FieldMemOperand(source, offset));
|
__ Ld_d(output, FieldMemOperand(source, offset));
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
@ -310,10 +310,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
LoadTaggedSignedField(output, source, offset);
|
LoadTaggedSignedField(output, source, offset);
|
||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ Ld_d(output, FieldMemOperand(source, offset));
|
|
||||||
}
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ Ld_hu(output, FieldMemOperand(source, offset));
|
__ Ld_hu(output, FieldMemOperand(source, offset));
|
||||||
@ -350,8 +346,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
{
|
{
|
||||||
@ -374,8 +370,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Ld_w(interrupt_budget,
|
__ Ld_w(interrupt_budget,
|
||||||
@ -394,8 +390,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Ld_w(interrupt_budget,
|
__ Ld_w(interrupt_budget,
|
||||||
@ -410,16 +406,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -428,33 +424,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
|
@ -304,8 +304,8 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ Ld(output, FieldMemOperand(source, offset));
|
__ Ld(output, FieldMemOperand(source, offset));
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
@ -318,10 +318,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
LoadTaggedSignedField(output, source, offset);
|
LoadTaggedSignedField(output, source, offset);
|
||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ Ld(output, FieldMemOperand(source, offset));
|
|
||||||
}
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ Lhu(output, FieldMemOperand(source, offset));
|
__ Lhu(output, FieldMemOperand(source, offset));
|
||||||
@ -360,8 +356,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
{
|
{
|
||||||
@ -384,8 +380,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Lw(interrupt_budget,
|
__ Lw(interrupt_budget,
|
||||||
@ -404,8 +400,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Lw(interrupt_budget,
|
__ Lw(interrupt_budget,
|
||||||
@ -420,16 +416,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -438,33 +434,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
|
@ -206,7 +206,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
|
|||||||
MemOperand operand, Label* target,
|
MemOperand operand, Label* target,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
__ LoadTaggedPointerField(ip, operand, r0);
|
__ LoadTaggedField(ip, operand, r0);
|
||||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,7 +214,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
|
|||||||
Register value, Label* target,
|
Register value, Label* target,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
__ LoadTaggedPointerField(ip, operand, r0);
|
__ LoadTaggedField(ip, operand, r0);
|
||||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -374,10 +374,10 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
|
__ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
@ -393,12 +393,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
ASM_CODE_COMMENT(masm_);
|
|
||||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
@ -443,8 +437,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
|
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
@ -469,8 +463,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ LoadU32(
|
__ LoadU32(
|
||||||
@ -494,8 +488,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ LoadU32(
|
__ LoadU32(
|
||||||
@ -513,17 +507,17 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -533,34 +527,30 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
|||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
|
@ -297,9 +297,9 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
|
__ LoadTaggedField(output, FieldMemOperand(source, offset));
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
@ -311,10 +311,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
LoadTaggedSignedField(output, source, offset);
|
LoadTaggedSignedField(output, source, offset);
|
||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
|
|
||||||
}
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ Lhu(output, FieldMemOperand(source, offset));
|
__ Lhu(output, FieldMemOperand(source, offset));
|
||||||
@ -351,8 +347,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough, clear_slot;
|
Label fallthrough, clear_slot;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
|
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
@ -379,8 +375,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Lw(interrupt_budget,
|
__ Lw(interrupt_budget,
|
||||||
@ -401,8 +397,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Lw(interrupt_budget,
|
__ Lw(interrupt_budget,
|
||||||
@ -419,16 +415,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -437,33 +433,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
|
@ -211,9 +211,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
|
|||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
MemOperand addr =
|
MemOperand addr =
|
||||||
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
|
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
|
||||||
__ LoadTaggedPointerField(ip, addr, r0);
|
__ LoadTaggedField(ip, addr, r0);
|
||||||
} else {
|
} else {
|
||||||
__ LoadTaggedPointerField(ip, operand, r0);
|
__ LoadTaggedField(ip, operand, r0);
|
||||||
}
|
}
|
||||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
||||||
}
|
}
|
||||||
@ -226,9 +226,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
|
|||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
MemOperand addr =
|
MemOperand addr =
|
||||||
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
|
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
|
||||||
__ LoadTaggedPointerField(ip, addr, r0);
|
__ LoadTaggedField(ip, addr, r0);
|
||||||
} else {
|
} else {
|
||||||
__ LoadTaggedPointerField(ip, operand, r0);
|
__ LoadTaggedField(ip, operand, r0);
|
||||||
}
|
}
|
||||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, ip, value, target);
|
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, ip, value, target);
|
||||||
}
|
}
|
||||||
@ -387,10 +387,10 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
|
__ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
@ -406,10 +406,10 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
|
__ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
@ -456,8 +456,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
|
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
@ -482,8 +482,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ LoadU32(
|
__ LoadU32(
|
||||||
@ -507,8 +507,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ LoadU32(
|
__ LoadU32(
|
||||||
@ -525,16 +525,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -543,33 +543,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
|
@ -287,9 +287,9 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
(__ Pop(registers), ...);
|
(__ Pop(registers), ...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
@ -300,10 +300,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
int offset) {
|
int offset) {
|
||||||
__ SmiUntagField(output, FieldOperand(source, offset));
|
__ SmiUntagField(output, FieldOperand(source, offset));
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
|
|
||||||
}
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ movzxwq(output, FieldOperand(source, offset));
|
__ movzxwq(output, FieldOperand(source, offset));
|
||||||
@ -331,45 +327,31 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
|
|||||||
__ StoreTaggedField(FieldOperand(target, offset), value);
|
__ StoreTaggedField(FieldOperand(target, offset), value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
|
void BaselineAssembler::LoadTaggedField(TaggedRegister output, Register source,
|
||||||
Register source, int offset) {
|
int offset) {
|
||||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
|
void BaselineAssembler::LoadTaggedField(TaggedRegister output,
|
||||||
TaggedRegister source,
|
TaggedRegister source, int offset) {
|
||||||
int offset) {
|
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output,
|
void BaselineAssembler::LoadTaggedField(Register output, TaggedRegister source,
|
||||||
TaggedRegister source,
|
int offset) {
|
||||||
int offset) {
|
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output,
|
|
||||||
TaggedRegister source, int offset) {
|
|
||||||
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(TaggedRegister output,
|
|
||||||
TaggedRegister source, int offset) {
|
|
||||||
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadFixedArrayElement(Register output,
|
void BaselineAssembler::LoadFixedArrayElement(Register output,
|
||||||
TaggedRegister array,
|
TaggedRegister array,
|
||||||
int32_t index) {
|
int32_t index) {
|
||||||
LoadTaggedAnyField(output, array,
|
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
|
||||||
FixedArray::kHeaderSize + index * kTaggedSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output,
|
void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output,
|
||||||
TaggedRegister array,
|
TaggedRegister array,
|
||||||
int32_t index) {
|
int32_t index) {
|
||||||
LoadTaggedAnyField(output, array,
|
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
|
||||||
FixedArray::kHeaderSize + index * kTaggedSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||||
@ -389,8 +371,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
// Decompresses pointer by complex addressing mode when necessary.
|
// Decompresses pointer by complex addressing mode when necessary.
|
||||||
TaggedRegister tagged(feedback_cell);
|
TaggedRegister tagged(feedback_cell);
|
||||||
LoadTaggedPointerField(tagged, feedback_cell,
|
LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
|
||||||
JSFunction::kFeedbackCellOffset);
|
|
||||||
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset),
|
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset),
|
||||||
Immediate(weight));
|
Immediate(weight));
|
||||||
if (skip_interrupt_label) {
|
if (skip_interrupt_label) {
|
||||||
@ -407,8 +388,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
// Decompresses pointer by complex addressing mode when necessary.
|
// Decompresses pointer by complex addressing mode when necessary.
|
||||||
TaggedRegister tagged(feedback_cell);
|
TaggedRegister tagged(feedback_cell);
|
||||||
LoadTaggedPointerField(tagged, feedback_cell,
|
LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
|
||||||
JSFunction::kFeedbackCellOffset);
|
|
||||||
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight);
|
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight);
|
||||||
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
||||||
}
|
}
|
||||||
@ -420,17 +400,17 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||||||
// addressing mode, any intermediate context pointer is loaded in compressed
|
// addressing mode, any intermediate context pointer is loaded in compressed
|
||||||
// form.
|
// form.
|
||||||
if (depth == 0) {
|
if (depth == 0) {
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
} else {
|
} else {
|
||||||
TaggedRegister tagged(context);
|
TaggedRegister tagged(context);
|
||||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||||
--depth;
|
--depth;
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
|
LoadTaggedField(kInterpreterAccumulatorRegister, tagged,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -442,10 +422,10 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
// form.
|
// form.
|
||||||
if (depth > 0) {
|
if (depth > 0) {
|
||||||
TaggedRegister tagged(context);
|
TaggedRegister tagged(context);
|
||||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||||
--depth;
|
--depth;
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
// Decompress tagged pointer.
|
// Decompress tagged pointer.
|
||||||
@ -463,29 +443,26 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
|||||||
// enabled, any intermediate context pointer is loaded in compressed form.
|
// enabled, any intermediate context pointer is loaded in compressed form.
|
||||||
TaggedRegister tagged(context);
|
TaggedRegister tagged(context);
|
||||||
if (depth == 0) {
|
if (depth == 0) {
|
||||||
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
|
LoadTaggedField(tagged, context, Context::kExtensionOffset);
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||||
--depth;
|
--depth;
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
|
LoadTaggedField(tagged, tagged, Context::kExtensionOffset);
|
||||||
}
|
}
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(tagged, tagged,
|
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(tagged, tagged,
|
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(tagged, tagged, cell_index);
|
LoadFixedArrayElement(tagged, tagged, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
|
LoadTaggedField(kInterpreterAccumulatorRegister, tagged, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
@ -495,17 +472,16 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
|||||||
// enabled, any intermediate context pointer is loaded in compressed form.
|
// enabled, any intermediate context pointer is loaded in compressed form.
|
||||||
TaggedRegister tagged(context);
|
TaggedRegister tagged(context);
|
||||||
if (depth == 0) {
|
if (depth == 0) {
|
||||||
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
|
LoadTaggedField(tagged, context, Context::kExtensionOffset);
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||||
--depth;
|
--depth;
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
|
LoadTaggedField(tagged, tagged, Context::kExtensionOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(tagged, tagged,
|
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
|
@ -213,7 +213,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
// -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
|
// -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
|
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
|
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
|
||||||
@ -423,7 +423,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
__ Cmp(scratch1, INTERPRETER_DATA_TYPE);
|
__ Cmp(scratch1, INTERPRETER_DATA_TYPE);
|
||||||
__ B(ne, &done);
|
__ B(ne, &done);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
sfi_data,
|
sfi_data,
|
||||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||||
__ Bind(&done);
|
__ Bind(&done);
|
||||||
@ -446,10 +446,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ AssertGeneratorObject(x1);
|
__ AssertGeneratorObject(x1);
|
||||||
|
|
||||||
// Load suspended function and context.
|
// Load suspended function and context.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(x4,
|
||||||
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(x4, JSFunction::kContextOffset));
|
|
||||||
|
|
||||||
// Flood function if we are stepping.
|
// Flood function if we are stepping.
|
||||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||||
@ -477,7 +476,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ B(lo, &stack_overflow);
|
__ B(lo, &stack_overflow);
|
||||||
|
|
||||||
// Get number of arguments for generator function.
|
// Get number of arguments for generator function.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Ldrh(w10, FieldMemOperand(
|
__ Ldrh(w10, FieldMemOperand(
|
||||||
x10, SharedFunctionInfo::kFormalParameterCountOffset));
|
x10, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -493,8 +492,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
|
__ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
|
||||||
|
|
||||||
// Poke receiver into highest claimed slot.
|
// Poke receiver into highest claimed slot.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(x5,
|
||||||
x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
|
FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
|
||||||
__ Poke(x5, __ ReceiverOperand(x10));
|
__ Poke(x5, __ ReceiverOperand(x10));
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -507,7 +506,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
// Copy the function arguments from the generator object's register file.
|
// Copy the function arguments from the generator object's register file.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x5,
|
x5,
|
||||||
FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
|
FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||||
{
|
{
|
||||||
@ -518,7 +517,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
__ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||||
__ Bind(&loop);
|
__ Bind(&loop);
|
||||||
__ Sub(x10, x10, 1);
|
__ Sub(x10, x10, 1);
|
||||||
__ LoadAnyTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
|
__ LoadTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
|
||||||
__ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
|
__ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
|
||||||
__ Cbnz(x10, &loop);
|
__ Cbnz(x10, &loop);
|
||||||
__ Bind(&done);
|
__ Bind(&done);
|
||||||
@ -527,9 +526,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// Underlying function needs to have bytecode available.
|
// Underlying function needs to have bytecode available.
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
|
x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, x3, x0, &is_baseline);
|
GetSharedFunctionInfoBytecodeOrBaseline(masm, x3, x0, &is_baseline);
|
||||||
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
|
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
|
||||||
@ -539,7 +538,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Resume (Ignition/TurboFan) generator object.
|
// Resume (Ignition/TurboFan) generator object.
|
||||||
{
|
{
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Ldrh(w0, FieldMemOperand(
|
__ Ldrh(w0, FieldMemOperand(
|
||||||
x0, SharedFunctionInfo::kFormalParameterCountOffset));
|
x0, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -549,7 +548,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Mov(x3, x1);
|
__ Mov(x3, x1);
|
||||||
__ Mov(x1, x4);
|
__ Mov(x1, x4);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
|
__ LoadTaggedField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
|
||||||
__ JumpCodeObject(x2);
|
__ JumpCodeObject(x2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -561,8 +560,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Push(x1, padreg, x4, x5);
|
__ Push(x1, padreg, x4, x5);
|
||||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||||
__ Pop(padreg, x1);
|
__ Pop(padreg, x1);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(x4,
|
||||||
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ B(&stepping_prepared);
|
__ B(&stepping_prepared);
|
||||||
|
|
||||||
@ -572,8 +571,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Push(x1, padreg);
|
__ Push(x1, padreg);
|
||||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||||
__ Pop(padreg, x1);
|
__ Pop(padreg, x1);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(x4,
|
||||||
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ B(&stepping_prepared);
|
__ B(&stepping_prepared);
|
||||||
|
|
||||||
@ -1108,11 +1107,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
Register feedback_vector = temps.AcquireX();
|
Register feedback_vector = temps.AcquireX();
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
__ AssertFeedbackVector(feedback_vector, x4);
|
__ AssertFeedbackVector(feedback_vector, x4);
|
||||||
|
|
||||||
// Check the tiering state.
|
// Check the tiering state.
|
||||||
@ -1270,9 +1268,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
|
|
||||||
// Get the bytecode array from the function object and load it into
|
// Get the bytecode array from the function object and load it into
|
||||||
// kInterpreterBytecodeArrayRegister.
|
// kInterpreterBytecodeArrayRegister.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
x4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kInterpreterBytecodeArrayRegister,
|
kInterpreterBytecodeArrayRegister,
|
||||||
FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
@ -1288,17 +1286,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ B(ne, &compile_lazy);
|
__ B(ne, &compile_lazy);
|
||||||
|
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
|
|
||||||
Label push_stack_frame;
|
Label push_stack_frame;
|
||||||
// Check if feedback vector is valid. If valid, check for optimized code
|
// Check if feedback vector is valid. If valid, check for optimized code
|
||||||
// and update invocation count. Otherwise, setup the stack frame.
|
// and update invocation count. Otherwise, setup the stack frame.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(x7,
|
||||||
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
||||||
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
||||||
__ B(ne, &push_stack_frame);
|
__ B(ne, &push_stack_frame);
|
||||||
@ -1480,16 +1477,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bind(&is_baseline);
|
__ bind(&is_baseline);
|
||||||
{
|
{
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
feedback_vector,
|
feedback_vector,
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
// allocate it.
|
// allocate it.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
||||||
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
||||||
@ -1732,16 +1729,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
|||||||
// get the custom trampoline, otherwise grab the entry address of the global
|
// get the custom trampoline, otherwise grab the entry address of the global
|
||||||
// trampoline.
|
// trampoline.
|
||||||
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
|
x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
__ CompareObjectType(x1, kInterpreterDispatchTableRegister,
|
__ CompareObjectType(x1, kInterpreterDispatchTableRegister,
|
||||||
kInterpreterDispatchTableRegister,
|
kInterpreterDispatchTableRegister,
|
||||||
INTERPRETER_DATA_TYPE);
|
INTERPRETER_DATA_TYPE);
|
||||||
__ B(ne, &builtin_trampoline);
|
__ B(ne, &builtin_trampoline);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
|
x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
|
||||||
__ LoadCodeEntry(x1, x1);
|
__ LoadCodeEntry(x1, x1);
|
||||||
__ B(&trampoline_loaded);
|
__ B(&trampoline_loaded);
|
||||||
@ -1997,7 +1994,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1,
|
x1,
|
||||||
FieldMemOperand(
|
FieldMemOperand(
|
||||||
x0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
x0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||||
@ -2348,7 +2345,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
|
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
|
||||||
Label ok, fail;
|
Label ok, fail;
|
||||||
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
|
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
|
||||||
__ LoadTaggedPointerField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
|
__ LoadTaggedField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
|
||||||
__ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
|
__ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
|
||||||
__ Cmp(x13, FIXED_ARRAY_TYPE);
|
__ Cmp(x13, FIXED_ARRAY_TYPE);
|
||||||
__ B(eq, &ok);
|
__ B(eq, &ok);
|
||||||
@ -2394,7 +2391,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
__ Add(argc, argc, len); // Update new argc.
|
__ Add(argc, argc, len); // Update new argc.
|
||||||
__ Bind(&loop);
|
__ Bind(&loop);
|
||||||
__ Sub(len, len, 1);
|
__ Sub(len, len, 1);
|
||||||
__ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
|
__ LoadTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
|
||||||
__ CmpTagged(scratch, the_hole_value);
|
__ CmpTagged(scratch, the_hole_value);
|
||||||
__ Csel(scratch, scratch, undefined_value, ne);
|
__ Csel(scratch, scratch, undefined_value, ne);
|
||||||
__ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
|
__ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
|
||||||
@ -2426,7 +2423,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
|||||||
if (mode == CallOrConstructMode::kConstruct) {
|
if (mode == CallOrConstructMode::kConstruct) {
|
||||||
Label new_target_constructor, new_target_not_constructor;
|
Label new_target_constructor, new_target_not_constructor;
|
||||||
__ JumpIfSmi(x3, &new_target_not_constructor);
|
__ JumpIfSmi(x3, &new_target_not_constructor);
|
||||||
__ LoadTaggedPointerField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
|
__ LoadTaggedField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
|
||||||
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
|
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
|
||||||
__ TestAndBranchIfAnySet(x5, Map::Bits1::IsConstructorBit::kMask,
|
__ TestAndBranchIfAnySet(x5, Map::Bits1::IsConstructorBit::kMask,
|
||||||
&new_target_constructor);
|
&new_target_constructor);
|
||||||
@ -2486,14 +2483,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
__ AssertCallableFunction(x1);
|
__ AssertCallableFunction(x1);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
|
|
||||||
// Enter the context of the function; ToObject has to run in the function
|
// Enter the context of the function; ToObject has to run in the function
|
||||||
// context, and we also need to take the global proxy from the function
|
// context, and we also need to take the global proxy from the function
|
||||||
// context in case of conversion.
|
// context in case of conversion.
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(x1, JSFunction::kContextOffset));
|
|
||||||
// We need to convert the receiver for non-native sloppy mode functions.
|
// We need to convert the receiver for non-native sloppy mode functions.
|
||||||
Label done_convert;
|
Label done_convert;
|
||||||
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
|
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
|
||||||
@ -2545,7 +2541,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ Pop(cp, x1, x0, padreg);
|
__ Pop(cp, x1, x0, padreg);
|
||||||
__ SmiUntag(x0);
|
__ SmiUntag(x0);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Bind(&convert_receiver);
|
__ Bind(&convert_receiver);
|
||||||
}
|
}
|
||||||
@ -2579,7 +2575,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Load [[BoundArguments]] into x2 and length of that into x4.
|
// Load [[BoundArguments]] into x2 and length of that into x4.
|
||||||
Label no_bound_arguments;
|
Label no_bound_arguments;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
|
bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
|
||||||
__ SmiUntagField(bound_argc,
|
__ SmiUntagField(bound_argc,
|
||||||
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
|
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
|
||||||
@ -2681,8 +2677,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
__ SlotAddress(copy_to, 1);
|
__ SlotAddress(copy_to, 1);
|
||||||
__ Bind(&loop);
|
__ Bind(&loop);
|
||||||
__ Sub(counter, counter, 1);
|
__ Sub(counter, counter, 1);
|
||||||
__ LoadAnyTaggedField(scratch,
|
__ LoadTaggedField(scratch,
|
||||||
MemOperand(bound_argv, kTaggedSize, PostIndex));
|
MemOperand(bound_argv, kTaggedSize, PostIndex));
|
||||||
__ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
|
__ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
|
||||||
__ Cbnz(counter, &loop);
|
__ Cbnz(counter, &loop);
|
||||||
}
|
}
|
||||||
@ -2703,15 +2699,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
__ AssertBoundFunction(x1);
|
__ AssertBoundFunction(x1);
|
||||||
|
|
||||||
// Patch the receiver to [[BoundThis]].
|
// Patch the receiver to [[BoundThis]].
|
||||||
__ LoadAnyTaggedField(x10,
|
__ LoadTaggedField(x10,
|
||||||
FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
|
FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
|
||||||
__ Poke(x10, __ ReceiverOperand(x0));
|
__ Poke(x10, __ ReceiverOperand(x0));
|
||||||
|
|
||||||
// Push the [[BoundArguments]] onto the stack.
|
// Push the [[BoundArguments]] onto the stack.
|
||||||
Generate_PushBoundArguments(masm);
|
Generate_PushBoundArguments(masm);
|
||||||
|
|
||||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
@ -2812,7 +2808,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
|||||||
Label call_generic_stub;
|
Label call_generic_stub;
|
||||||
|
|
||||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
|
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ TestAndBranchIfAllClear(
|
__ TestAndBranchIfAllClear(
|
||||||
@ -2844,13 +2840,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
|||||||
Label done;
|
Label done;
|
||||||
__ CmpTagged(x1, x3);
|
__ CmpTagged(x1, x3);
|
||||||
__ B(ne, &done);
|
__ B(ne, &done);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Bind(&done);
|
__ Bind(&done);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -2874,8 +2870,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
|||||||
__ JumpIfSmi(target, &non_constructor);
|
__ JumpIfSmi(target, &non_constructor);
|
||||||
|
|
||||||
// Check if target has a [[Construct]] internal method.
|
// Check if target has a [[Construct]] internal method.
|
||||||
__ LoadTaggedPointerField(map,
|
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(target, HeapObject::kMapOffset));
|
|
||||||
{
|
{
|
||||||
Register flags = x2;
|
Register flags = x2;
|
||||||
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
||||||
@ -2976,12 +2971,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
|||||||
Register scratch = x10;
|
Register scratch = x10;
|
||||||
Label allocate_vector, done;
|
Label allocate_vector, done;
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
vector, FieldMemOperand(kWasmInstanceRegister,
|
vector, FieldMemOperand(kWasmInstanceRegister,
|
||||||
WasmInstanceObject::kFeedbackVectorsOffset));
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
__ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
|
__ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
|
||||||
__ LoadTaggedPointerField(vector,
|
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||||
FieldMemOperand(vector, FixedArray::kHeaderSize));
|
|
||||||
__ JumpIfSmi(vector, &allocate_vector);
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ Push(vector, xzr);
|
__ Push(vector, xzr);
|
||||||
@ -3121,8 +3115,8 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
|
|||||||
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
|
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
|
||||||
// We had to prepare the parameters for the Call: we have to put the context
|
// We had to prepare the parameters for the Call: we have to put the context
|
||||||
// into kContextRegister.
|
// into kContextRegister.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
kContextRegister, // cp(x27)
|
kContextRegister, // cp(x27)
|
||||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||||
WasmInstanceObject::kNativeContextOffset)));
|
WasmInstanceObject::kNativeContextOffset)));
|
||||||
}
|
}
|
||||||
@ -3210,7 +3204,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data,
|
|||||||
MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset));
|
MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset));
|
||||||
__ Stp(wasm_instance, function_data,
|
__ Stp(wasm_instance, function_data,
|
||||||
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
|
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
kContextRegister,
|
kContextRegister,
|
||||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||||
WasmInstanceObject::kNativeContextOffset)));
|
WasmInstanceObject::kNativeContextOffset)));
|
||||||
@ -3256,10 +3250,9 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
|
|||||||
wasm::JumpBuffer::Retired);
|
wasm::JumpBuffer::Retired);
|
||||||
}
|
}
|
||||||
Register parent = tmp2;
|
Register parent = tmp2;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(parent,
|
||||||
parent,
|
FieldMemOperand(active_continuation,
|
||||||
FieldMemOperand(active_continuation,
|
WasmContinuationObject::kParentOffset));
|
||||||
WasmContinuationObject::kParentOffset));
|
|
||||||
|
|
||||||
// Update active continuation root.
|
// Update active continuation root.
|
||||||
int32_t active_continuation_offset =
|
int32_t active_continuation_offset =
|
||||||
@ -3293,7 +3286,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
|
|||||||
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset);
|
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset);
|
||||||
__ Move(tmp2, Smi::FromInt(WasmSuspenderObject::kInactive));
|
__ Move(tmp2, Smi::FromInt(WasmSuspenderObject::kInactive));
|
||||||
__ StoreTaggedField(tmp2, state_loc);
|
__ StoreTaggedField(tmp2, state_loc);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender,
|
suspender,
|
||||||
FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||||
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
|
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
|
||||||
@ -3322,17 +3315,16 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
|
|||||||
Register function_data,
|
Register function_data,
|
||||||
Register wasm_instance) {
|
Register wasm_instance) {
|
||||||
Register closure = function_data;
|
Register closure = function_data;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_data,
|
function_data,
|
||||||
MemOperand(
|
MemOperand(
|
||||||
closure,
|
closure,
|
||||||
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_data,
|
function_data,
|
||||||
FieldMemOperand(function_data,
|
FieldMemOperand(function_data, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
SharedFunctionInfo::kFunctionDataOffset));
|
|
||||||
|
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
wasm_instance,
|
wasm_instance,
|
||||||
FieldMemOperand(function_data,
|
FieldMemOperand(function_data,
|
||||||
WasmExportedFunctionData::kInstanceOffset));
|
WasmExportedFunctionData::kInstanceOffset));
|
||||||
@ -3573,7 +3565,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
|||||||
// A result of AllocateSuspender is in the return register.
|
// A result of AllocateSuspender is in the return register.
|
||||||
__ Str(suspender, MemOperand(fp, kSuspenderOffset));
|
__ Str(suspender, MemOperand(fp, kSuspenderOffset));
|
||||||
DEFINE_SCOPED(target_continuation);
|
DEFINE_SCOPED(target_continuation);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
target_continuation,
|
target_continuation,
|
||||||
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
FREE_REG(suspender);
|
FREE_REG(suspender);
|
||||||
@ -4229,7 +4221,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
|||||||
__ Mov(scratch, 1);
|
__ Mov(scratch, 1);
|
||||||
__ Str(scratch, MemOperand(thread_in_wasm_flag_addr, 0));
|
__ Str(scratch, MemOperand(thread_in_wasm_flag_addr, 0));
|
||||||
|
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_entry,
|
function_entry,
|
||||||
FieldMemOperand(function_data,
|
FieldMemOperand(function_data,
|
||||||
WasmExportedFunctionData::kInternalOffset));
|
WasmExportedFunctionData::kInternalOffset));
|
||||||
@ -4497,7 +4489,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||||||
regs.ResetExcept(promise, suspender, continuation);
|
regs.ResetExcept(promise, suspender, continuation);
|
||||||
|
|
||||||
DEFINE_REG(suspender_continuation);
|
DEFINE_REG(suspender_continuation);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender_continuation,
|
suspender_continuation,
|
||||||
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
@ -4518,15 +4510,15 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||||||
// Update roots.
|
// Update roots.
|
||||||
// -------------------------------------------
|
// -------------------------------------------
|
||||||
DEFINE_REG(caller);
|
DEFINE_REG(caller);
|
||||||
__ LoadAnyTaggedField(caller,
|
__ LoadTaggedField(caller,
|
||||||
FieldMemOperand(suspender_continuation,
|
FieldMemOperand(suspender_continuation,
|
||||||
WasmContinuationObject::kParentOffset));
|
WasmContinuationObject::kParentOffset));
|
||||||
int32_t active_continuation_offset =
|
int32_t active_continuation_offset =
|
||||||
MacroAssembler::RootRegisterOffsetForRootIndex(
|
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||||
RootIndex::kActiveContinuation);
|
RootIndex::kActiveContinuation);
|
||||||
__ Str(caller, MemOperand(kRootRegister, active_continuation_offset));
|
__ Str(caller, MemOperand(kRootRegister, active_continuation_offset));
|
||||||
DEFINE_REG(parent);
|
DEFINE_REG(parent);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||||
int32_t active_suspender_offset =
|
int32_t active_suspender_offset =
|
||||||
MacroAssembler::RootRegisterOffsetForRootIndex(
|
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||||
@ -4597,7 +4589,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
// Load suspender from closure.
|
// Load suspender from closure.
|
||||||
// -------------------------------------------
|
// -------------------------------------------
|
||||||
DEFINE_REG(sfi);
|
DEFINE_REG(sfi);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
sfi,
|
sfi,
|
||||||
MemOperand(
|
MemOperand(
|
||||||
closure,
|
closure,
|
||||||
@ -4607,12 +4599,12 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
// RecordWriteField calls later.
|
// RecordWriteField calls later.
|
||||||
DEFINE_PINNED(suspender, WriteBarrierDescriptor::ObjectRegister());
|
DEFINE_PINNED(suspender, WriteBarrierDescriptor::ObjectRegister());
|
||||||
DEFINE_REG(function_data);
|
DEFINE_REG(function_data);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_data,
|
function_data,
|
||||||
FieldMemOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
// The write barrier uses a fixed register for the host object (rdi). The next
|
// The write barrier uses a fixed register for the host object (rdi). The next
|
||||||
// barrier is on the suspender, so load it in rdi directly.
|
// barrier is on the suspender, so load it in rdi directly.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender,
|
suspender,
|
||||||
FieldMemOperand(function_data, WasmResumeData::kSuspenderOffset));
|
FieldMemOperand(function_data, WasmResumeData::kSuspenderOffset));
|
||||||
// Check the suspender state.
|
// Check the suspender state.
|
||||||
@ -4671,10 +4663,9 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
FREE_REG(suspender);
|
FREE_REG(suspender);
|
||||||
DEFINE_PINNED(target_continuation, WriteBarrierDescriptor::ObjectRegister());
|
DEFINE_PINNED(target_continuation, WriteBarrierDescriptor::ObjectRegister());
|
||||||
suspender = target_continuation;
|
suspender = target_continuation;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
target_continuation,
|
target_continuation,
|
||||||
FieldMemOperand(suspender,
|
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
WasmSuspenderObject::kContinuationOffset));
|
|
||||||
suspender = no_reg;
|
suspender = no_reg;
|
||||||
|
|
||||||
__ StoreTaggedField(
|
__ StoreTaggedField(
|
||||||
@ -5321,12 +5312,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
|
DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
|
||||||
name));
|
name));
|
||||||
|
|
||||||
__ LoadAnyTaggedField(data,
|
__ LoadTaggedField(data,
|
||||||
FieldMemOperand(callback, AccessorInfo::kDataOffset));
|
FieldMemOperand(callback, AccessorInfo::kDataOffset));
|
||||||
__ LoadRoot(undef, RootIndex::kUndefinedValue);
|
__ LoadRoot(undef, RootIndex::kUndefinedValue);
|
||||||
__ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate()));
|
__ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate()));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(name,
|
||||||
name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
||||||
|
|
||||||
// PropertyCallbackArguments:
|
// PropertyCallbackArguments:
|
||||||
// receiver, data, return value, return value default, isolate, holder,
|
// receiver, data, return value, return value default, isolate, holder,
|
||||||
@ -5697,10 +5688,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Get the InstructionStream object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = x22;
|
Register code_obj = x22;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
@ -5732,11 +5723,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Load the feedback vector.
|
// Load the feedback vector.
|
||||||
Register feedback_vector = x2;
|
Register feedback_vector = x2;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
|
@ -64,7 +64,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0);
|
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0);
|
||||||
__ bne(&done);
|
__ bne(&done);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
sfi_data,
|
sfi_data,
|
||||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset), r0);
|
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset), r0);
|
||||||
|
|
||||||
@ -120,10 +120,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Get the InstructionStream object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = r9;
|
Register code_obj = r9;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset),
|
code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset),
|
||||||
r0);
|
r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0);
|
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||||
|
|
||||||
@ -155,12 +155,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Load the feedback vector.
|
// Load the feedback vector.
|
||||||
Register feedback_vector = r5;
|
Register feedback_vector = r5;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
|
||||||
r0);
|
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
@ -431,7 +430,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4,
|
r4,
|
||||||
FieldMemOperand(
|
FieldMemOperand(
|
||||||
r3, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset),
|
r3, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset),
|
||||||
@ -495,7 +494,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
// -- sp[4*kSystemPointerSize]: context
|
// -- sp[4*kSystemPointerSize]: context
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
|
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
|
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
|
||||||
@ -660,10 +659,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ AssertGeneratorObject(r4);
|
__ AssertGeneratorObject(r4);
|
||||||
|
|
||||||
// Load suspended function and context.
|
// Load suspended function and context.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
||||||
__ LoadTaggedPointerField(cp, FieldMemOperand(r7, JSFunction::kContextOffset),
|
__ LoadTaggedField(cp, FieldMemOperand(r7, JSFunction::kContextOffset), r0);
|
||||||
r0);
|
|
||||||
|
|
||||||
// Flood function if we are stepping.
|
// Flood function if we are stepping.
|
||||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||||
@ -703,12 +701,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
// Copy the function arguments from the generator object's register file.
|
// Copy the function arguments from the generator object's register file.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ LoadU16(
|
__ LoadU16(
|
||||||
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
|
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
__ subi(r6, r6, Operand(kJSArgcReceiverSlots));
|
__ subi(r6, r6, Operand(kJSArgcReceiverSlots));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
|
r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
|
||||||
r0);
|
r0);
|
||||||
{
|
{
|
||||||
@ -719,14 +717,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ blt(&done_loop);
|
__ blt(&done_loop);
|
||||||
__ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
|
||||||
__ add(scratch, r5, r10);
|
__ add(scratch, r5, r10);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
|
FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
__ b(&loop);
|
__ b(&loop);
|
||||||
__ bind(&done_loop);
|
__ bind(&done_loop);
|
||||||
|
|
||||||
// Push receiver.
|
// Push receiver.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset), r0);
|
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset), r0);
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
}
|
}
|
||||||
@ -734,9 +732,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// Underlying function needs to have bytecode available.
|
// Underlying function needs to have bytecode available.
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset), r0);
|
r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, r6, ip, &is_baseline);
|
GetSharedFunctionInfoBytecodeOrBaseline(masm, r6, ip, &is_baseline);
|
||||||
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
|
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
|
||||||
@ -746,7 +744,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Resume (Ignition/TurboFan) generator object.
|
// Resume (Ignition/TurboFan) generator object.
|
||||||
{
|
{
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ LoadU16(r3, FieldMemOperand(
|
__ LoadU16(r3, FieldMemOperand(
|
||||||
r3, SharedFunctionInfo::kFormalParameterCountOffset));
|
r3, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -756,8 +754,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ mr(r6, r4);
|
__ mr(r6, r4);
|
||||||
__ mr(r4, r7);
|
__ mr(r4, r7);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset),
|
__ LoadTaggedField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0);
|
||||||
r0);
|
|
||||||
__ JumpCodeObject(r5);
|
__ JumpCodeObject(r5);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -769,7 +766,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ PushRoot(RootIndex::kTheHoleValue);
|
__ PushRoot(RootIndex::kTheHoleValue);
|
||||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||||
__ Pop(r4);
|
__ Pop(r4);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
||||||
}
|
}
|
||||||
__ b(&stepping_prepared);
|
__ b(&stepping_prepared);
|
||||||
@ -780,7 +777,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Push(r4);
|
__ Push(r4);
|
||||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||||
__ Pop(r4);
|
__ Pop(r4);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
||||||
}
|
}
|
||||||
__ b(&stepping_prepared);
|
__ b(&stepping_prepared);
|
||||||
@ -1212,12 +1209,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
Register feedback_vector = ip;
|
Register feedback_vector = ip;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
|
||||||
r0);
|
|
||||||
__ AssertFeedbackVector(feedback_vector, r11);
|
__ AssertFeedbackVector(feedback_vector, r11);
|
||||||
|
|
||||||
// Check for an tiering state.
|
// Check for an tiering state.
|
||||||
@ -1378,10 +1374,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
|
|
||||||
// Get the bytecode array from the function object and load it into
|
// Get the bytecode array from the function object and load it into
|
||||||
// kInterpreterBytecodeArrayRegister.
|
// kInterpreterBytecodeArrayRegister.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0);
|
r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
// Load original bytecode array or the debug copy.
|
// Load original bytecode array or the debug copy.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kInterpreterBytecodeArrayRegister,
|
kInterpreterBytecodeArrayRegister,
|
||||||
FieldMemOperand(r7, SharedFunctionInfo::kFunctionDataOffset), r0);
|
FieldMemOperand(r7, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||||
|
|
||||||
@ -1397,17 +1393,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bne(&compile_lazy);
|
__ bne(&compile_lazy);
|
||||||
|
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
|
||||||
r0);
|
|
||||||
|
|
||||||
Label push_stack_frame;
|
Label push_stack_frame;
|
||||||
// Check if feedback vector is valid. If valid, check for optimized code
|
// Check if feedback vector is valid. If valid, check for optimized code
|
||||||
// and update invocation count. Otherwise, setup the stack frame.
|
// and update invocation count. Otherwise, setup the stack frame.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
|
r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
|
||||||
__ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
|
__ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
|
||||||
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
|
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
|
||||||
@ -1589,17 +1584,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bind(&is_baseline);
|
__ bind(&is_baseline);
|
||||||
{
|
{
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
feedback_vector,
|
feedback_vector,
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
||||||
r0);
|
r0);
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
// allocate it.
|
// allocate it.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
|
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
|
||||||
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
|
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
|
||||||
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE), r0);
|
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE), r0);
|
||||||
@ -1773,16 +1768,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
|||||||
// get the custom trampoline, otherwise grab the entry address of the global
|
// get the custom trampoline, otherwise grab the entry address of the global
|
||||||
// trampoline.
|
// trampoline.
|
||||||
__ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
__ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset), r0);
|
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset), r0);
|
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||||
__ CompareObjectType(r5, kInterpreterDispatchTableRegister,
|
__ CompareObjectType(r5, kInterpreterDispatchTableRegister,
|
||||||
kInterpreterDispatchTableRegister,
|
kInterpreterDispatchTableRegister,
|
||||||
INTERPRETER_DATA_TYPE);
|
INTERPRETER_DATA_TYPE);
|
||||||
__ bne(&builtin_trampoline);
|
__ bne(&builtin_trampoline);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset),
|
r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset),
|
||||||
r0);
|
r0);
|
||||||
__ LoadCodeEntry(r5, r5);
|
__ LoadCodeEntry(r5, r5);
|
||||||
@ -2240,8 +2235,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
|
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
|
||||||
Label ok, fail;
|
Label ok, fail;
|
||||||
__ AssertNotSmi(r5);
|
__ AssertNotSmi(r5);
|
||||||
__ LoadTaggedPointerField(scratch,
|
__ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset),
|
||||||
FieldMemOperand(r5, HeapObject::kMapOffset), r0);
|
r0);
|
||||||
__ LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
__ LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
||||||
__ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
|
__ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
|
||||||
__ beq(&ok);
|
__ beq(&ok);
|
||||||
@ -2276,7 +2271,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
|
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
|
||||||
__ mtctr(r7);
|
__ mtctr(r7);
|
||||||
__ bind(&loop);
|
__ bind(&loop);
|
||||||
__ LoadTaggedPointerField(scratch, MemOperand(r5, kTaggedSize), r0);
|
__ LoadTaggedField(scratch, MemOperand(r5, kTaggedSize), r0);
|
||||||
__ addi(r5, r5, Operand(kTaggedSize));
|
__ addi(r5, r5, Operand(kTaggedSize));
|
||||||
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
|
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
|
||||||
__ bne(&skip);
|
__ bne(&skip);
|
||||||
@ -2311,8 +2306,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
|||||||
if (mode == CallOrConstructMode::kConstruct) {
|
if (mode == CallOrConstructMode::kConstruct) {
|
||||||
Label new_target_constructor, new_target_not_constructor;
|
Label new_target_constructor, new_target_not_constructor;
|
||||||
__ JumpIfSmi(r6, &new_target_not_constructor);
|
__ JumpIfSmi(r6, &new_target_not_constructor);
|
||||||
__ LoadTaggedPointerField(scratch,
|
__ LoadTaggedField(scratch, FieldMemOperand(r6, HeapObject::kMapOffset),
|
||||||
FieldMemOperand(r6, HeapObject::kMapOffset), r0);
|
r0);
|
||||||
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
||||||
__ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
|
__ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
|
||||||
__ bne(&new_target_constructor, cr0);
|
__ bne(&new_target_constructor, cr0);
|
||||||
@ -2395,14 +2390,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
__ AssertCallableFunction(r4);
|
__ AssertCallableFunction(r4);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
|
|
||||||
// Enter the context of the function; ToObject has to run in the function
|
// Enter the context of the function; ToObject has to run in the function
|
||||||
// context, and we also need to take the global proxy from the function
|
// context, and we also need to take the global proxy from the function
|
||||||
// context in case of conversion.
|
// context in case of conversion.
|
||||||
__ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
|
__ LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
|
||||||
r0);
|
|
||||||
// We need to convert the receiver for non-native sloppy mode functions.
|
// We need to convert the receiver for non-native sloppy mode functions.
|
||||||
Label done_convert;
|
Label done_convert;
|
||||||
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
|
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
|
||||||
@ -2456,7 +2450,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ Pop(r3, r4);
|
__ Pop(r3, r4);
|
||||||
__ SmiUntag(r3);
|
__ SmiUntag(r3);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ bind(&convert_receiver);
|
__ bind(&convert_receiver);
|
||||||
}
|
}
|
||||||
@ -2487,7 +2481,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Load [[BoundArguments]] into r5 and length of that into r7.
|
// Load [[BoundArguments]] into r5 and length of that into r7.
|
||||||
Label no_bound_arguments;
|
Label no_bound_arguments;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset), r0);
|
r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset), r0);
|
||||||
__ SmiUntag(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC, r0);
|
__ SmiUntag(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC, r0);
|
||||||
__ beq(&no_bound_arguments, cr0);
|
__ beq(&no_bound_arguments, cr0);
|
||||||
@ -2536,7 +2530,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
__ subi(r7, r7, Operand(1));
|
__ subi(r7, r7, Operand(1));
|
||||||
__ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
|
||||||
__ add(scratch, scratch, r5);
|
__ add(scratch, scratch, r5);
|
||||||
__ LoadAnyTaggedField(scratch, MemOperand(scratch), r0);
|
__ LoadTaggedField(scratch, MemOperand(scratch), r0);
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
__ bdnz(&loop);
|
__ bdnz(&loop);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
@ -2559,15 +2553,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
__ AssertBoundFunction(r4);
|
__ AssertBoundFunction(r4);
|
||||||
|
|
||||||
// Patch the receiver to [[BoundThis]].
|
// Patch the receiver to [[BoundThis]].
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset),
|
||||||
r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset), r0);
|
r0);
|
||||||
__ StoreReceiver(r6, r3, ip);
|
__ StoreReceiver(r6, r3, ip);
|
||||||
|
|
||||||
// Push the [[BoundArguments]] onto the stack.
|
// Push the [[BoundArguments]] onto the stack.
|
||||||
Generate_PushBoundArguments(masm);
|
Generate_PushBoundArguments(masm);
|
||||||
|
|
||||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
@ -2667,7 +2661,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
|||||||
Label call_generic_stub;
|
Label call_generic_stub;
|
||||||
|
|
||||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
|
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||||
@ -2699,12 +2693,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
|||||||
Label skip;
|
Label skip;
|
||||||
__ CompareTagged(r4, r6);
|
__ CompareTagged(r4, r6);
|
||||||
__ bne(&skip);
|
__ bne(&skip);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
||||||
__ bind(&skip);
|
__ bind(&skip);
|
||||||
|
|
||||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -2728,8 +2722,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
|||||||
__ JumpIfSmi(target, &non_constructor);
|
__ JumpIfSmi(target, &non_constructor);
|
||||||
|
|
||||||
// Check if target has a [[Construct]] internal method.
|
// Check if target has a [[Construct]] internal method.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
|
||||||
map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
|
|
||||||
{
|
{
|
||||||
Register flags = r5;
|
Register flags = r5;
|
||||||
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
||||||
@ -2817,15 +2810,15 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
|||||||
Register scratch = ip;
|
Register scratch = ip;
|
||||||
Label allocate_vector, done;
|
Label allocate_vector, done;
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
vector,
|
vector,
|
||||||
FieldMemOperand(kWasmInstanceRegister,
|
FieldMemOperand(kWasmInstanceRegister,
|
||||||
WasmInstanceObject::kFeedbackVectorsOffset),
|
WasmInstanceObject::kFeedbackVectorsOffset),
|
||||||
scratch);
|
scratch);
|
||||||
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
||||||
__ AddS64(vector, vector, scratch);
|
__ AddS64(vector, vector, scratch);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize),
|
||||||
vector, FieldMemOperand(vector, FixedArray::kHeaderSize), scratch);
|
scratch);
|
||||||
__ JumpIfSmi(vector, &allocate_vector);
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ push(kWasmInstanceRegister);
|
__ push(kWasmInstanceRegister);
|
||||||
@ -3530,16 +3523,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
|
|
||||||
__ push(receiver);
|
__ push(receiver);
|
||||||
// Push data from AccessorInfo.
|
// Push data from AccessorInfo.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
|
FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
|
||||||
__ push(scratch);
|
__ push(scratch);
|
||||||
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
||||||
__ Push(scratch, scratch);
|
__ Push(scratch, scratch);
|
||||||
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
|
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
|
||||||
__ Push(scratch, holder);
|
__ Push(scratch, holder);
|
||||||
__ Push(Smi::zero()); // should_throw_on_error -> false
|
__ Push(Smi::zero()); // should_throw_on_error -> false
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
|
FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
|
||||||
__ push(scratch);
|
__ push(scratch);
|
||||||
|
|
||||||
// v8::PropertyCallbackInfo::args_ array and name handle.
|
// v8::PropertyCallbackInfo::args_ array and name handle.
|
||||||
|
@ -155,7 +155,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
{
|
{
|
||||||
UseScratchRegisterScope temps(masm);
|
UseScratchRegisterScope temps(masm);
|
||||||
Register func_info = temps.Acquire();
|
Register func_info = temps.Acquire();
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Load32U(func_info,
|
__ Load32U(func_info,
|
||||||
FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset));
|
FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset));
|
||||||
@ -353,7 +353,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
|||||||
|
|
||||||
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
|
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
|
||||||
Label::Distance::kNear);
|
Label::Distance::kNear);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
sfi_data,
|
sfi_data,
|
||||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||||
|
|
||||||
@ -377,10 +377,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ AssertGeneratorObject(a1);
|
__ AssertGeneratorObject(a1);
|
||||||
|
|
||||||
// Load suspended function and context.
|
// Load suspended function and context.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(a4,
|
||||||
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(a4, JSFunction::kContextOffset));
|
|
||||||
|
|
||||||
// Flood function if we are stepping.
|
// Flood function if we are stepping.
|
||||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||||
@ -417,12 +416,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// context allocation for any variables in generators, the actual argument
|
// context allocation for any variables in generators, the actual argument
|
||||||
// values have already been copied into the context and these dummy values
|
// values have already been copied into the context and these dummy values
|
||||||
// will never be used.
|
// will never be used.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Lhu(a3,
|
__ Lhu(a3,
|
||||||
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
|
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
__ SubWord(a3, a3, Operand(kJSArgcReceiverSlots));
|
__ SubWord(a3, a3, Operand(kJSArgcReceiverSlots));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
t1,
|
t1,
|
||||||
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
|
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||||
{
|
{
|
||||||
@ -431,23 +430,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ SubWord(a3, a3, Operand(1));
|
__ SubWord(a3, a3, Operand(1));
|
||||||
__ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
|
__ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
|
||||||
__ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
|
__ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(kScratchReg,
|
||||||
kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
|
FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
|
||||||
__ Push(kScratchReg);
|
__ Push(kScratchReg);
|
||||||
__ Branch(&loop);
|
__ Branch(&loop);
|
||||||
__ bind(&done_loop);
|
__ bind(&done_loop);
|
||||||
// Push receiver.
|
// Push receiver.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(kScratchReg,
|
||||||
kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
|
FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
|
||||||
__ Push(kScratchReg);
|
__ Push(kScratchReg);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Underlying function needs to have bytecode available.
|
// Underlying function needs to have bytecode available.
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
|
a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
|
GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
|
||||||
__ GetObjectType(a3, a3, a3);
|
__ GetObjectType(a3, a3, a3);
|
||||||
@ -458,7 +457,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Resume (Ignition/TurboFan) generator object.
|
// Resume (Ignition/TurboFan) generator object.
|
||||||
{
|
{
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Lhu(a0, FieldMemOperand(
|
__ Lhu(a0, FieldMemOperand(
|
||||||
a0, SharedFunctionInfo::kFormalParameterCountOffset));
|
a0, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -468,7 +467,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Move(a3, a1);
|
__ Move(a3, a1);
|
||||||
__ Move(a1, a4);
|
__ Move(a1, a4);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
|
__ LoadTaggedField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
|
||||||
__ JumpCodeObject(a2);
|
__ JumpCodeObject(a2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -481,8 +480,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||||
__ Pop(a1);
|
__ Pop(a1);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(a4,
|
||||||
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||||
__ Branch(&stepping_prepared);
|
__ Branch(&stepping_prepared);
|
||||||
|
|
||||||
__ bind(&prepare_step_in_suspended_generator);
|
__ bind(&prepare_step_in_suspended_generator);
|
||||||
@ -492,8 +491,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||||
__ Pop(a1);
|
__ Pop(a1);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(a4,
|
||||||
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||||
__ Branch(&stepping_prepared);
|
__ Branch(&stepping_prepared);
|
||||||
|
|
||||||
__ bind(&stack_overflow);
|
__ bind(&stack_overflow);
|
||||||
@ -1130,10 +1129,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
Register feedback_vector = a2;
|
Register feedback_vector = a2;
|
||||||
// Get the bytecode array from the function object and load it into
|
// Get the bytecode array from the function object and load it into
|
||||||
// kInterpreterBytecodeArrayRegister.
|
// kInterpreterBytecodeArrayRegister.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kScratchReg,
|
kScratchReg,
|
||||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kInterpreterBytecodeArrayRegister,
|
kInterpreterBytecodeArrayRegister,
|
||||||
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
@ -1147,17 +1146,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
|
__ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
|
||||||
|
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
|
|
||||||
Label push_stack_frame;
|
Label push_stack_frame;
|
||||||
// Check if feedback vector is valid. If valid, check for optimized code
|
// Check if feedback vector is valid. If valid, check for optimized code
|
||||||
// and update invocation count. Otherwise, setup the stack frame.
|
// and update invocation count. Otherwise, setup the stack frame.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(a4,
|
||||||
a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
|
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
|
||||||
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
|
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
|
||||||
Label::Distance::kNear);
|
Label::Distance::kNear);
|
||||||
@ -1331,16 +1329,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bind(&is_baseline);
|
__ bind(&is_baseline);
|
||||||
{
|
{
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
feedback_vector,
|
feedback_vector,
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
// allocate it.
|
// allocate it.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
|
__ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
|
||||||
__ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
|
__ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
|
||||||
@ -1511,16 +1509,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
|||||||
// get the custom trampoline, otherwise grab the entry address of the global
|
// get the custom trampoline, otherwise grab the entry address of the global
|
||||||
// trampoline.
|
// trampoline.
|
||||||
__ LoadWord(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
__ LoadWord(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
|
t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
|
t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
__ GetObjectType(t0, kInterpreterDispatchTableRegister,
|
__ GetObjectType(t0, kInterpreterDispatchTableRegister,
|
||||||
kInterpreterDispatchTableRegister);
|
kInterpreterDispatchTableRegister);
|
||||||
__ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
|
__ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
|
||||||
Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear);
|
Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
|
t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
|
||||||
__ LoadCodeEntry(t0, t0);
|
__ LoadCodeEntry(t0, t0);
|
||||||
__ BranchShort(&trampoline_loaded);
|
__ BranchShort(&trampoline_loaded);
|
||||||
@ -1778,7 +1776,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a1,
|
a1,
|
||||||
MemOperand(a0,
|
MemOperand(a0,
|
||||||
InstructionStream::kDeoptimizationDataOrInterpreterDataOffset -
|
InstructionStream::kDeoptimizationDataOrInterpreterDataOffset -
|
||||||
@ -2152,7 +2150,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
__ SubWord(scratch, sp, Operand(scratch));
|
__ SubWord(scratch, sp, Operand(scratch));
|
||||||
__ LoadRoot(hole_value, RootIndex::kTheHoleValue);
|
__ LoadRoot(hole_value, RootIndex::kTheHoleValue);
|
||||||
__ bind(&loop);
|
__ bind(&loop);
|
||||||
__ LoadTaggedPointerField(a5, MemOperand(src));
|
__ LoadTaggedField(a5, MemOperand(src));
|
||||||
__ AddWord(src, src, kTaggedSize);
|
__ AddWord(src, src, kTaggedSize);
|
||||||
__ Branch(&push, ne, a5, Operand(hole_value), Label::Distance::kNear);
|
__ Branch(&push, ne, a5, Operand(hole_value), Label::Distance::kNear);
|
||||||
__ LoadRoot(a5, RootIndex::kUndefinedValue);
|
__ LoadRoot(a5, RootIndex::kUndefinedValue);
|
||||||
@ -2190,8 +2188,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
|||||||
UseScratchRegisterScope temps(masm);
|
UseScratchRegisterScope temps(masm);
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ JumpIfSmi(a3, &new_target_not_constructor);
|
__ JumpIfSmi(a3, &new_target_not_constructor);
|
||||||
__ LoadTaggedPointerField(scratch,
|
__ LoadTaggedField(scratch, FieldMemOperand(a3, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(a3, HeapObject::kMapOffset));
|
|
||||||
__ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
__ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
||||||
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
|
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
|
||||||
__ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg),
|
__ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg),
|
||||||
@ -2271,7 +2268,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ AssertCallableFunction(a1);
|
__ AssertCallableFunction(a1);
|
||||||
|
|
||||||
Label class_constructor;
|
Label class_constructor;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
|
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ And(kScratchReg, a3,
|
__ And(kScratchReg, a3,
|
||||||
@ -2281,8 +2278,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
// Enter the context of the function; ToObject has to run in the function
|
// Enter the context of the function; ToObject has to run in the function
|
||||||
// context, and we also need to take the global proxy from the function
|
// context, and we also need to take the global proxy from the function
|
||||||
// context in case of conversion.
|
// context in case of conversion.
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(a1, JSFunction::kContextOffset));
|
|
||||||
// We need to convert the receiver for non-native sloppy mode functions.
|
// We need to convert the receiver for non-native sloppy mode functions.
|
||||||
Label done_convert;
|
Label done_convert;
|
||||||
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
|
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
|
||||||
@ -2337,7 +2333,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ Pop(a0, a1);
|
__ Pop(a0, a1);
|
||||||
__ SmiUntag(a0);
|
__ SmiUntag(a0);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ bind(&convert_receiver);
|
__ bind(&convert_receiver);
|
||||||
}
|
}
|
||||||
@ -2379,7 +2375,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
Register bound_argv = a2;
|
Register bound_argv = a2;
|
||||||
// Load [[BoundArguments]] into a2 and length of that into a4.
|
// Load [[BoundArguments]] into a2 and length of that into a4.
|
||||||
Label no_bound_arguments;
|
Label no_bound_arguments;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
|
bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
|
||||||
__ SmiUntagField(bound_argc,
|
__ SmiUntagField(bound_argc,
|
||||||
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
|
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
|
||||||
@ -2423,7 +2419,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
__ SubWord(a4, a4, Operand(1));
|
__ SubWord(a4, a4, Operand(1));
|
||||||
__ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear);
|
__ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear);
|
||||||
__ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2);
|
__ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2);
|
||||||
__ LoadAnyTaggedField(kScratchReg, MemOperand(a5));
|
__ LoadTaggedField(kScratchReg, MemOperand(a5));
|
||||||
__ Push(kScratchReg);
|
__ Push(kScratchReg);
|
||||||
__ Branch(&loop);
|
__ Branch(&loop);
|
||||||
__ bind(&done_loop);
|
__ bind(&done_loop);
|
||||||
@ -2449,8 +2445,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
{
|
{
|
||||||
UseScratchRegisterScope temps(masm);
|
UseScratchRegisterScope temps(masm);
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
|
FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
|
||||||
__ StoreReceiver(scratch, a0, kScratchReg);
|
__ StoreReceiver(scratch, a0, kScratchReg);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2458,7 +2454,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
Generate_PushBoundArguments(masm);
|
Generate_PushBoundArguments(masm);
|
||||||
|
|
||||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
@ -2548,7 +2544,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
|||||||
Label call_generic_stub;
|
Label call_generic_stub;
|
||||||
|
|
||||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Load32U(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
|
__ Load32U(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
__ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||||
@ -2587,12 +2583,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
|||||||
__ Branch(&skip, ne, a1, Operand(a3), Label::Distance::kNear);
|
__ Branch(&skip, ne, a1, Operand(a3), Label::Distance::kNear);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ bind(&skip);
|
__ bind(&skip);
|
||||||
|
|
||||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -2615,7 +2611,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
|||||||
temps.Include(t0, t1);
|
temps.Include(t0, t1);
|
||||||
Register map = temps.Acquire();
|
Register map = temps.Acquire();
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ LoadTaggedPointerField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
|
__ LoadTaggedField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
|
||||||
__ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
|
__ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||||
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
|
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
|
||||||
__ Branch(&non_constructor, eq, scratch, Operand(zero_reg));
|
__ Branch(&non_constructor, eq, scratch, Operand(zero_reg));
|
||||||
@ -3366,8 +3362,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
__ SubWord(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize);
|
__ SubWord(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize);
|
||||||
__ StoreWord(receiver,
|
__ StoreWord(receiver,
|
||||||
MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize));
|
MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize));
|
||||||
__ LoadAnyTaggedField(scratch,
|
__ LoadTaggedField(scratch,
|
||||||
FieldMemOperand(callback, AccessorInfo::kDataOffset));
|
FieldMemOperand(callback, AccessorInfo::kDataOffset));
|
||||||
__ StoreWord(scratch,
|
__ StoreWord(scratch,
|
||||||
MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize));
|
MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize));
|
||||||
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
||||||
@ -3385,8 +3381,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
DCHECK_EQ(0, Smi::zero().ptr());
|
DCHECK_EQ(0, Smi::zero().ptr());
|
||||||
__ StoreWord(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) *
|
__ StoreWord(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) *
|
||||||
kSystemPointerSize));
|
kSystemPointerSize));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
||||||
__ StoreWord(scratch, MemOperand(sp, 0 * kSystemPointerSize));
|
__ StoreWord(scratch, MemOperand(sp, 0 * kSystemPointerSize));
|
||||||
|
|
||||||
// v8::PropertyCallbackInfo::args_ array and name handle.
|
// v8::PropertyCallbackInfo::args_ array and name handle.
|
||||||
@ -3677,10 +3673,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Get the InstructionStream object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = s1;
|
Register code_obj = s1;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
@ -3719,11 +3715,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Replace BytecodeOffset with the feedback vector.
|
// Replace BytecodeOffset with the feedback vector.
|
||||||
Register feedback_vector = a2;
|
Register feedback_vector = a2;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
// allocate it.
|
// allocate it.
|
||||||
|
@ -65,7 +65,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE));
|
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE));
|
||||||
__ bne(&done);
|
__ bne(&done);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
sfi_data,
|
sfi_data,
|
||||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||||
|
|
||||||
@ -120,10 +120,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Get the InstructionStream object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = r8;
|
Register code_obj = r8;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
@ -155,11 +155,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Load the feedback vector.
|
// Load the feedback vector.
|
||||||
Register feedback_vector = r4;
|
Register feedback_vector = r4;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
@ -320,7 +319,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r3,
|
r3,
|
||||||
FieldMemOperand(
|
FieldMemOperand(
|
||||||
r2, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
r2, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||||
@ -472,7 +471,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
// -- sp[4*kSystemPointerSize]: context
|
// -- sp[4*kSystemPointerSize]: context
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
|
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
|
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
|
||||||
@ -633,10 +632,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ AssertGeneratorObject(r3);
|
__ AssertGeneratorObject(r3);
|
||||||
|
|
||||||
// Load suspended function and context.
|
// Load suspended function and context.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r6,
|
||||||
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(r6, JSFunction::kContextOffset));
|
|
||||||
|
|
||||||
// Flood function if we are stepping.
|
// Flood function if we are stepping.
|
||||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||||
@ -677,12 +675,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
// Copy the function arguments from the generator object's register file.
|
// Copy the function arguments from the generator object's register file.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadU16(
|
__ LoadU16(
|
||||||
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
|
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
__ SubS64(r5, r5, Operand(kJSArgcReceiverSlots));
|
__ SubS64(r5, r5, Operand(kJSArgcReceiverSlots));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4,
|
r4,
|
||||||
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
|
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||||
{
|
{
|
||||||
@ -692,24 +690,24 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ blt(&done_loop);
|
__ blt(&done_loop);
|
||||||
__ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2));
|
||||||
__ la(scratch, MemOperand(r4, r1));
|
__ la(scratch, MemOperand(r4, r1));
|
||||||
__ LoadAnyTaggedField(scratch,
|
__ LoadTaggedField(scratch,
|
||||||
FieldMemOperand(scratch, FixedArray::kHeaderSize));
|
FieldMemOperand(scratch, FixedArray::kHeaderSize));
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
__ b(&loop);
|
__ b(&loop);
|
||||||
__ bind(&done_loop);
|
__ bind(&done_loop);
|
||||||
|
|
||||||
// Push receiver.
|
// Push receiver.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
|
FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Underlying function needs to have bytecode available.
|
// Underlying function needs to have bytecode available.
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
|
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, r5, ip, &is_baseline);
|
GetSharedFunctionInfoBytecodeOrBaseline(masm, r5, ip, &is_baseline);
|
||||||
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
|
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
|
||||||
@ -719,7 +717,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Resume (Ignition/TurboFan) generator object.
|
// Resume (Ignition/TurboFan) generator object.
|
||||||
{
|
{
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadS16(
|
__ LoadS16(
|
||||||
r2,
|
r2,
|
||||||
@ -730,7 +728,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ mov(r5, r3);
|
__ mov(r5, r3);
|
||||||
__ mov(r3, r6);
|
__ mov(r3, r6);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
__ LoadTaggedField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
||||||
__ JumpCodeObject(r4);
|
__ JumpCodeObject(r4);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -742,8 +740,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ PushRoot(RootIndex::kTheHoleValue);
|
__ PushRoot(RootIndex::kTheHoleValue);
|
||||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||||
__ Pop(r3);
|
__ Pop(r3);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r6,
|
||||||
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ b(&stepping_prepared);
|
__ b(&stepping_prepared);
|
||||||
|
|
||||||
@ -753,8 +751,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Push(r3);
|
__ Push(r3);
|
||||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||||
__ Pop(r3);
|
__ Pop(r3);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r6,
|
||||||
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ b(&stepping_prepared);
|
__ b(&stepping_prepared);
|
||||||
|
|
||||||
@ -1245,11 +1243,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
Register feedback_vector = ip;
|
Register feedback_vector = ip;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
__ AssertFeedbackVector(feedback_vector, r1);
|
__ AssertFeedbackVector(feedback_vector, r1);
|
||||||
|
|
||||||
// Check for an tiering state.
|
// Check for an tiering state.
|
||||||
@ -1406,10 +1403,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
|
|
||||||
// Get the bytecode array from the function object and load it into
|
// Get the bytecode array from the function object and load it into
|
||||||
// kInterpreterBytecodeArrayRegister.
|
// kInterpreterBytecodeArrayRegister.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
// Load original bytecode array or the debug copy.
|
// Load original bytecode array or the debug copy.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kInterpreterBytecodeArrayRegister,
|
kInterpreterBytecodeArrayRegister,
|
||||||
FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
@ -1425,17 +1422,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bne(&compile_lazy);
|
__ bne(&compile_lazy);
|
||||||
|
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
|
|
||||||
Label push_stack_frame;
|
Label push_stack_frame;
|
||||||
// Check if feedback vector is valid. If valid, check for optimized code
|
// Check if feedback vector is valid. If valid, check for optimized code
|
||||||
// and update invocation count. Otherwise, setup the stack frame.
|
// and update invocation count. Otherwise, setup the stack frame.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r6,
|
||||||
r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
|
__ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
|
||||||
__ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE));
|
__ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE));
|
||||||
__ bne(&push_stack_frame);
|
__ bne(&push_stack_frame);
|
||||||
@ -1611,16 +1607,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bind(&is_baseline);
|
__ bind(&is_baseline);
|
||||||
{
|
{
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
feedback_vector,
|
feedback_vector,
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
// allocate it.
|
// allocate it.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
|
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
|
||||||
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE));
|
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE));
|
||||||
@ -1792,16 +1788,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
|||||||
// get the custom trampoline, otherwise grab the entry address of the global
|
// get the custom trampoline, otherwise grab the entry address of the global
|
||||||
// trampoline.
|
// trampoline.
|
||||||
__ LoadU64(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
__ LoadU64(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
|
r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
|
r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
__ CompareObjectType(r4, kInterpreterDispatchTableRegister,
|
__ CompareObjectType(r4, kInterpreterDispatchTableRegister,
|
||||||
kInterpreterDispatchTableRegister,
|
kInterpreterDispatchTableRegister,
|
||||||
INTERPRETER_DATA_TYPE);
|
INTERPRETER_DATA_TYPE);
|
||||||
__ bne(&builtin_trampoline);
|
__ bne(&builtin_trampoline);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
|
r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
|
||||||
__ LoadCodeEntry(r4, r4);
|
__ LoadCodeEntry(r4, r4);
|
||||||
__ b(&trampoline_loaded);
|
__ b(&trampoline_loaded);
|
||||||
@ -2240,8 +2236,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
// Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
|
// Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
|
||||||
Label ok, fail;
|
Label ok, fail;
|
||||||
__ AssertNotSmi(r4);
|
__ AssertNotSmi(r4);
|
||||||
__ LoadTaggedPointerField(scratch,
|
__ LoadTaggedField(scratch, FieldMemOperand(r4, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(r4, HeapObject::kMapOffset));
|
|
||||||
__ LoadS16(scratch,
|
__ LoadS16(scratch,
|
||||||
FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
||||||
__ CmpS64(scratch, Operand(FIXED_ARRAY_TYPE));
|
__ CmpS64(scratch, Operand(FIXED_ARRAY_TYPE));
|
||||||
@ -2277,7 +2272,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
|
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
|
||||||
__ mov(r1, r6);
|
__ mov(r1, r6);
|
||||||
__ bind(&loop);
|
__ bind(&loop);
|
||||||
__ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
|
__ LoadTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
|
||||||
__ la(r4, MemOperand(r4, kTaggedSize));
|
__ la(r4, MemOperand(r4, kTaggedSize));
|
||||||
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
|
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
|
||||||
__ bne(&skip, Label::kNear);
|
__ bne(&skip, Label::kNear);
|
||||||
@ -2312,8 +2307,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
|||||||
if (mode == CallOrConstructMode::kConstruct) {
|
if (mode == CallOrConstructMode::kConstruct) {
|
||||||
Label new_target_constructor, new_target_not_constructor;
|
Label new_target_constructor, new_target_not_constructor;
|
||||||
__ JumpIfSmi(r5, &new_target_not_constructor);
|
__ JumpIfSmi(r5, &new_target_not_constructor);
|
||||||
__ LoadTaggedPointerField(scratch,
|
__ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(r5, HeapObject::kMapOffset));
|
|
||||||
__ LoadU8(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
__ LoadU8(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
||||||
__ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift));
|
__ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift));
|
||||||
__ bne(&new_target_constructor);
|
__ bne(&new_target_constructor);
|
||||||
@ -2397,14 +2391,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
__ AssertCallableFunction(r3);
|
__ AssertCallableFunction(r3);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||||
|
|
||||||
// Enter the context of the function; ToObject has to run in the function
|
// Enter the context of the function; ToObject has to run in the function
|
||||||
// context, and we also need to take the global proxy from the function
|
// context, and we also need to take the global proxy from the function
|
||||||
// context in case of conversion.
|
// context in case of conversion.
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(r3, JSFunction::kContextOffset));
|
|
||||||
// We need to convert the receiver for non-native sloppy mode functions.
|
// We need to convert the receiver for non-native sloppy mode functions.
|
||||||
Label done_convert;
|
Label done_convert;
|
||||||
__ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
|
__ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
|
||||||
@ -2458,7 +2451,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ Pop(r2, r3);
|
__ Pop(r2, r3);
|
||||||
__ SmiUntag(r2);
|
__ SmiUntag(r2);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ bind(&convert_receiver);
|
__ bind(&convert_receiver);
|
||||||
}
|
}
|
||||||
@ -2489,7 +2482,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Load [[BoundArguments]] into r4 and length of that into r6.
|
// Load [[BoundArguments]] into r4 and length of that into r6.
|
||||||
Label no_bound_arguments;
|
Label no_bound_arguments;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
|
r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
|
||||||
__ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
|
__ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
|
||||||
__ LoadAndTestP(r6, r6);
|
__ LoadAndTestP(r6, r6);
|
||||||
@ -2535,7 +2528,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
__ bind(&loop);
|
__ bind(&loop);
|
||||||
__ SubS64(r1, r6, Operand(1));
|
__ SubS64(r1, r6, Operand(1));
|
||||||
__ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2));
|
||||||
__ LoadAnyTaggedField(scratch, MemOperand(r4, r1), r0);
|
__ LoadTaggedField(scratch, MemOperand(r4, r1), r0);
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
__ SubS64(r6, r6, Operand(1));
|
__ SubS64(r6, r6, Operand(1));
|
||||||
__ bgt(&loop);
|
__ bgt(&loop);
|
||||||
@ -2559,15 +2552,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
__ AssertBoundFunction(r3);
|
__ AssertBoundFunction(r3);
|
||||||
|
|
||||||
// Patch the receiver to [[BoundThis]].
|
// Patch the receiver to [[BoundThis]].
|
||||||
__ LoadAnyTaggedField(r5,
|
__ LoadTaggedField(r5,
|
||||||
FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
|
FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
|
||||||
__ StoreReceiver(r5, r2, r1);
|
__ StoreReceiver(r5, r2, r1);
|
||||||
|
|
||||||
// Push the [[BoundArguments]] onto the stack.
|
// Push the [[BoundArguments]] onto the stack.
|
||||||
Generate_PushBoundArguments(masm);
|
Generate_PushBoundArguments(masm);
|
||||||
|
|
||||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
@ -2667,7 +2660,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
|||||||
Label call_generic_stub;
|
Label call_generic_stub;
|
||||||
|
|
||||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
|
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
__ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||||
@ -2698,12 +2691,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
|||||||
Label skip;
|
Label skip;
|
||||||
__ CompareTagged(r3, r5);
|
__ CompareTagged(r3, r5);
|
||||||
__ bne(&skip);
|
__ bne(&skip);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ bind(&skip);
|
__ bind(&skip);
|
||||||
|
|
||||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -2727,8 +2720,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
|||||||
__ JumpIfSmi(target, &non_constructor);
|
__ JumpIfSmi(target, &non_constructor);
|
||||||
|
|
||||||
// Check if target has a [[Construct]] internal method.
|
// Check if target has a [[Construct]] internal method.
|
||||||
__ LoadTaggedPointerField(map,
|
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(target, HeapObject::kMapOffset));
|
|
||||||
{
|
{
|
||||||
Register flags = r4;
|
Register flags = r4;
|
||||||
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
||||||
@ -2811,13 +2803,12 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
|||||||
Register scratch = r0;
|
Register scratch = r0;
|
||||||
Label allocate_vector, done;
|
Label allocate_vector, done;
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
vector, FieldMemOperand(kWasmInstanceRegister,
|
vector, FieldMemOperand(kWasmInstanceRegister,
|
||||||
WasmInstanceObject::kFeedbackVectorsOffset));
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
||||||
__ AddS64(vector, vector, scratch);
|
__ AddS64(vector, vector, scratch);
|
||||||
__ LoadTaggedPointerField(vector,
|
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||||
FieldMemOperand(vector, FixedArray::kHeaderSize));
|
|
||||||
__ JumpIfSmi(vector, &allocate_vector);
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ push(kWasmInstanceRegister);
|
__ push(kWasmInstanceRegister);
|
||||||
@ -3504,16 +3495,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
|
|
||||||
__ push(receiver);
|
__ push(receiver);
|
||||||
// Push data from AccessorInfo.
|
// Push data from AccessorInfo.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
|
FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
|
||||||
__ push(scratch);
|
__ push(scratch);
|
||||||
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
||||||
__ Push(scratch, scratch);
|
__ Push(scratch, scratch);
|
||||||
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
|
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
|
||||||
__ Push(scratch, holder);
|
__ Push(scratch, holder);
|
||||||
__ Push(Smi::zero()); // should_throw_on_error -> false
|
__ Push(Smi::zero()); // should_throw_on_error -> false
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
|
FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
|
||||||
__ push(scratch);
|
__ push(scratch);
|
||||||
|
|
||||||
// v8::PropertyCallbackInfo::args_ array and name handle.
|
// v8::PropertyCallbackInfo::args_ array and name handle.
|
||||||
|
@ -171,9 +171,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
const TaggedRegister shared_function_info(rbx);
|
const TaggedRegister shared_function_info(rbx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(shared_function_info,
|
||||||
shared_function_info,
|
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
|
||||||
__ movl(rbx,
|
__ movl(rbx,
|
||||||
FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset));
|
FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
|
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
|
||||||
@ -701,7 +700,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
|||||||
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
|
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
|
||||||
__ j(not_equal, &done, Label::kNear);
|
__ j(not_equal, &done, Label::kNear);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||||
|
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
@ -729,9 +728,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
|
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
|
||||||
|
|
||||||
// Load suspended function and context.
|
// Load suspended function and context.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rdi,
|
||||||
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
__ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
||||||
|
|
||||||
// Flood function if we are stepping.
|
// Flood function if we are stepping.
|
||||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||||
@ -768,12 +767,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
// Copy the function arguments from the generator object's register file.
|
// Copy the function arguments from the generator object's register file.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rcx,
|
||||||
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ movzxwq(
|
__ movzxwq(
|
||||||
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
|
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
__ decq(rcx); // Exclude receiver.
|
__ decq(rcx); // Exclude receiver.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
|
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -781,24 +780,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ bind(&loop);
|
__ bind(&loop);
|
||||||
__ decq(rcx);
|
__ decq(rcx);
|
||||||
__ j(less, &done_loop, Label::kNear);
|
__ j(less, &done_loop, Label::kNear);
|
||||||
__ PushTaggedAnyField(
|
__ PushTaggedField(
|
||||||
FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize),
|
FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize),
|
||||||
decompr_scratch1);
|
decompr_scratch1);
|
||||||
__ jmp(&loop);
|
__ jmp(&loop);
|
||||||
__ bind(&done_loop);
|
__ bind(&done_loop);
|
||||||
|
|
||||||
// Push the receiver.
|
// Push the receiver.
|
||||||
__ PushTaggedPointerField(
|
__ PushTaggedField(FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
|
||||||
FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
|
decompr_scratch1);
|
||||||
decompr_scratch1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Underlying function needs to have bytecode available.
|
// Underlying function needs to have bytecode available.
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
Label is_baseline, ok;
|
Label is_baseline, ok;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
|
rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister,
|
GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister,
|
||||||
&is_baseline);
|
&is_baseline);
|
||||||
@ -816,7 +814,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// Resume (Ignition/TurboFan) generator object.
|
// Resume (Ignition/TurboFan) generator object.
|
||||||
{
|
{
|
||||||
__ PushReturnAddressFrom(rax);
|
__ PushReturnAddressFrom(rax);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ movzxwq(rax, FieldOperand(
|
__ movzxwq(rax, FieldOperand(
|
||||||
rax, SharedFunctionInfo::kFormalParameterCountOffset));
|
rax, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -824,7 +822,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// pass in the generator object. In ordinary calls, new.target is always
|
// pass in the generator object. In ordinary calls, new.target is always
|
||||||
// undefined because generator functions are non-constructable.
|
// undefined because generator functions are non-constructable.
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
|
__ LoadTaggedField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
|
||||||
__ JumpCodeObject(rcx);
|
__ JumpCodeObject(rcx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -837,8 +835,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ PushRoot(RootIndex::kTheHoleValue);
|
__ PushRoot(RootIndex::kTheHoleValue);
|
||||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||||
__ Pop(rdx);
|
__ Pop(rdx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rdi,
|
||||||
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ jmp(&stepping_prepared);
|
__ jmp(&stepping_prepared);
|
||||||
|
|
||||||
@ -848,8 +846,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Push(rdx);
|
__ Push(rdx);
|
||||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||||
__ Pop(rdx);
|
__ Pop(rdx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rdi,
|
||||||
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ jmp(&stepping_prepared);
|
__ jmp(&stepping_prepared);
|
||||||
|
|
||||||
@ -1019,13 +1017,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
// Get the bytecode array from the function object and load it into
|
// Get the bytecode array from the function object and load it into
|
||||||
// kInterpreterBytecodeArrayRegister.
|
// kInterpreterBytecodeArrayRegister.
|
||||||
const TaggedRegister shared_function_info(kScratchRegister);
|
const TaggedRegister shared_function_info(kScratchRegister);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
shared_function_info,
|
shared_function_info,
|
||||||
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(kInterpreterBytecodeArrayRegister,
|
||||||
kInterpreterBytecodeArrayRegister,
|
FieldOperand(shared_function_info,
|
||||||
FieldOperand(shared_function_info,
|
SharedFunctionInfo::kFunctionDataOffset));
|
||||||
SharedFunctionInfo::kFunctionDataOffset));
|
|
||||||
|
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(
|
GetSharedFunctionInfoBytecodeOrBaseline(
|
||||||
@ -1040,10 +1037,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
|
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
TaggedRegister feedback_cell(feedback_vector);
|
TaggedRegister feedback_cell(feedback_vector);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_cell,
|
||||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(feedback_vector,
|
__ LoadTaggedField(feedback_vector,
|
||||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||||
|
|
||||||
Label push_stack_frame;
|
Label push_stack_frame;
|
||||||
// Check if feedback vector is valid. If valid, check for optimized code
|
// Check if feedback vector is valid. If valid, check for optimized code
|
||||||
@ -1220,10 +1217,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
{
|
{
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
TaggedRegister feedback_cell(feedback_vector);
|
TaggedRegister feedback_cell(feedback_vector);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_cell,
|
||||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(feedback_vector,
|
__ LoadTaggedField(feedback_vector,
|
||||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
@ -1417,16 +1414,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
|||||||
// trampoline.
|
// trampoline.
|
||||||
__ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
|
__ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
|
||||||
const TaggedRegister shared_function_info(rbx);
|
const TaggedRegister shared_function_info(rbx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(shared_function_info,
|
||||||
shared_function_info,
|
FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
|
||||||
FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
|
__ LoadTaggedField(rbx,
|
||||||
__ LoadTaggedPointerField(
|
FieldOperand(shared_function_info,
|
||||||
rbx, FieldOperand(shared_function_info,
|
SharedFunctionInfo::kFunctionDataOffset));
|
||||||
SharedFunctionInfo::kFunctionDataOffset));
|
|
||||||
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
|
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
|
||||||
__ j(not_equal, &builtin_trampoline, Label::kNear);
|
__ j(not_equal, &builtin_trampoline, Label::kNear);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
|
rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
|
||||||
__ LoadCodeEntry(rbx, rbx);
|
__ LoadCodeEntry(rbx, rbx);
|
||||||
__ jmp(&trampoline_loaded, Label::kNear);
|
__ jmp(&trampoline_loaded, Label::kNear);
|
||||||
@ -1555,10 +1551,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
TaggedRegister feedback_cell(feedback_vector);
|
TaggedRegister feedback_cell(feedback_vector);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_cell,
|
||||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(feedback_vector,
|
__ LoadTaggedField(feedback_vector,
|
||||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||||
__ AssertFeedbackVector(feedback_vector);
|
__ AssertFeedbackVector(feedback_vector);
|
||||||
|
|
||||||
// Check the tiering state.
|
// Check the tiering state.
|
||||||
@ -2097,8 +2093,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
__ cmpl(current, num);
|
__ cmpl(current, num);
|
||||||
__ j(equal, &done, Label::kNear);
|
__ j(equal, &done, Label::kNear);
|
||||||
// Turn the hole into undefined as we go.
|
// Turn the hole into undefined as we go.
|
||||||
__ LoadAnyTaggedField(value, FieldOperand(src, current, times_tagged_size,
|
__ LoadTaggedField(value, FieldOperand(src, current, times_tagged_size,
|
||||||
FixedArray::kHeaderSize));
|
FixedArray::kHeaderSize));
|
||||||
__ CompareRoot(value, RootIndex::kTheHoleValue);
|
__ CompareRoot(value, RootIndex::kTheHoleValue);
|
||||||
__ j(not_equal, &push, Label::kNear);
|
__ j(not_equal, &push, Label::kNear);
|
||||||
__ LoadRoot(value, RootIndex::kUndefinedValue);
|
__ LoadRoot(value, RootIndex::kUndefinedValue);
|
||||||
@ -2213,8 +2209,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
StackArgumentsAccessor args(rax);
|
StackArgumentsAccessor args(rax);
|
||||||
__ AssertCallableFunction(rdi);
|
__ AssertCallableFunction(rdi);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rdx,
|
||||||
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
// -- rax : the number of arguments
|
// -- rax : the number of arguments
|
||||||
// -- rdx : the shared function info.
|
// -- rdx : the shared function info.
|
||||||
@ -2224,7 +2220,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
// Enter the context of the function; ToObject has to run in the function
|
// Enter the context of the function; ToObject has to run in the function
|
||||||
// context, and we also need to take the global proxy from the function
|
// context, and we also need to take the global proxy from the function
|
||||||
// context in case of conversion.
|
// context in case of conversion.
|
||||||
__ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
__ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
||||||
// We need to convert the receiver for non-native sloppy mode functions.
|
// We need to convert the receiver for non-native sloppy mode functions.
|
||||||
Label done_convert;
|
Label done_convert;
|
||||||
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
|
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
|
||||||
@ -2281,7 +2277,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ Pop(rax);
|
__ Pop(rax);
|
||||||
__ SmiUntagUnsigned(rax);
|
__ SmiUntagUnsigned(rax);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ bind(&convert_receiver);
|
__ bind(&convert_receiver);
|
||||||
}
|
}
|
||||||
@ -2312,8 +2308,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Load [[BoundArguments]] into rcx and length of that into rbx.
|
// Load [[BoundArguments]] into rcx and length of that into rbx.
|
||||||
Label no_bound_arguments;
|
Label no_bound_arguments;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rcx,
|
||||||
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
||||||
__ SmiUntagFieldUnsigned(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
__ SmiUntagFieldUnsigned(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||||
__ testl(rbx, rbx);
|
__ testl(rbx, rbx);
|
||||||
__ j(zero, &no_bound_arguments);
|
__ j(zero, &no_bound_arguments);
|
||||||
@ -2354,7 +2350,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
// Push [[BoundArguments]] to the stack.
|
// Push [[BoundArguments]] to the stack.
|
||||||
{
|
{
|
||||||
Label loop;
|
Label loop;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
||||||
__ SmiUntagFieldUnsigned(rbx,
|
__ SmiUntagFieldUnsigned(rbx,
|
||||||
FieldOperand(rcx, FixedArray::kLengthOffset));
|
FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||||
@ -2364,9 +2360,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
// offset in order to be able to move decl(rbx) right before the loop
|
// offset in order to be able to move decl(rbx) right before the loop
|
||||||
// condition. This is necessary in order to avoid flags corruption by
|
// condition. This is necessary in order to avoid flags corruption by
|
||||||
// pointer decompression code.
|
// pointer decompression code.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(r12,
|
||||||
r12, FieldOperand(rcx, rbx, times_tagged_size,
|
FieldOperand(rcx, rbx, times_tagged_size,
|
||||||
FixedArray::kHeaderSize - kTaggedSize));
|
FixedArray::kHeaderSize - kTaggedSize));
|
||||||
__ Push(r12);
|
__ Push(r12);
|
||||||
__ decl(rbx);
|
__ decl(rbx);
|
||||||
__ j(greater, &loop);
|
__ j(greater, &loop);
|
||||||
@ -2391,15 +2387,14 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Patch the receiver to [[BoundThis]].
|
// Patch the receiver to [[BoundThis]].
|
||||||
StackArgumentsAccessor args(rax);
|
StackArgumentsAccessor args(rax);
|
||||||
__ LoadAnyTaggedField(rbx,
|
__ LoadTaggedField(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
|
||||||
FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
|
|
||||||
__ movq(args.GetReceiverOperand(), rbx);
|
__ movq(args.GetReceiverOperand(), rbx);
|
||||||
|
|
||||||
// Push the [[BoundArguments]] onto the stack.
|
// Push the [[BoundArguments]] onto the stack.
|
||||||
Generate_PushBoundArguments(masm);
|
Generate_PushBoundArguments(masm);
|
||||||
|
|
||||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
@ -2498,9 +2493,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||||
const TaggedRegister shared_function_info(rcx);
|
const TaggedRegister shared_function_info(rcx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(shared_function_info,
|
||||||
shared_function_info,
|
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
|
||||||
__ testl(FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset),
|
__ testl(FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset),
|
||||||
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
|
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
|
||||||
@ -2528,13 +2522,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
|||||||
Label done;
|
Label done;
|
||||||
__ cmpq(rdi, rdx);
|
__ cmpq(rdi, rdx);
|
||||||
__ j(not_equal, &done, Label::kNear);
|
__ j(not_equal, &done, Label::kNear);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -2677,7 +2671,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
const TaggedRegister deopt_data(rbx);
|
const TaggedRegister deopt_data(rbx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
deopt_data,
|
deopt_data,
|
||||||
FieldOperand(
|
FieldOperand(
|
||||||
rax, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
rax, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||||
@ -2776,12 +2770,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
|||||||
__ Push(rbp);
|
__ Push(rbp);
|
||||||
__ Move(rbp, rsp);
|
__ Move(rbp, rsp);
|
||||||
__ Push(Immediate(StackFrame::TypeToMarker(StackFrame::WASM)));
|
__ Push(Immediate(StackFrame::TypeToMarker(StackFrame::WASM)));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(vector,
|
||||||
vector, FieldOperand(kWasmInstanceRegister,
|
FieldOperand(kWasmInstanceRegister,
|
||||||
WasmInstanceObject::kFeedbackVectorsOffset));
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
__ LoadTaggedPointerField(vector,
|
__ LoadTaggedField(vector, FieldOperand(vector, func_index, times_tagged_size,
|
||||||
FieldOperand(vector, func_index, times_tagged_size,
|
FixedArray::kHeaderSize));
|
||||||
FixedArray::kHeaderSize));
|
|
||||||
Label allocate_vector, done;
|
Label allocate_vector, done;
|
||||||
__ JumpIfSmi(vector, &allocate_vector);
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
@ -2931,7 +2924,7 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
|
|||||||
__ pushq(function_data);
|
__ pushq(function_data);
|
||||||
// We had to prepare the parameters for the Call: we have to put the context
|
// We had to prepare the parameters for the Call: we have to put the context
|
||||||
// into rsi.
|
// into rsi.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
rsi,
|
rsi,
|
||||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||||
WasmInstanceObject::kNativeContextOffset)));
|
WasmInstanceObject::kNativeContextOffset)));
|
||||||
@ -3012,7 +3005,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data,
|
|||||||
__ Move(GCScanSlotPlace, 2);
|
__ Move(GCScanSlotPlace, 2);
|
||||||
__ Push(wasm_instance);
|
__ Push(wasm_instance);
|
||||||
__ Push(function_data);
|
__ Push(function_data);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
kContextRegister,
|
kContextRegister,
|
||||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||||
WasmInstanceObject::kNativeContextOffset)));
|
WasmInstanceObject::kNativeContextOffset)));
|
||||||
@ -3052,7 +3045,7 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
|
|||||||
wasm::JumpBuffer::Retired);
|
wasm::JumpBuffer::Retired);
|
||||||
|
|
||||||
Register parent = tmp2;
|
Register parent = tmp2;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
parent,
|
parent,
|
||||||
FieldOperand(active_continuation, WasmContinuationObject::kParentOffset));
|
FieldOperand(active_continuation, WasmContinuationObject::kParentOffset));
|
||||||
|
|
||||||
@ -3083,7 +3076,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
|
|||||||
__ StoreTaggedSignedField(
|
__ StoreTaggedSignedField(
|
||||||
FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
|
FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
|
||||||
Smi::FromInt(WasmSuspenderObject::kInactive));
|
Smi::FromInt(WasmSuspenderObject::kInactive));
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
suspender, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||||
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
|
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
|
||||||
Label undefined;
|
Label undefined;
|
||||||
@ -3111,19 +3104,19 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
|
|||||||
Register wasm_instance) {
|
Register wasm_instance) {
|
||||||
Register closure = function_data;
|
Register closure = function_data;
|
||||||
Register shared_function_info = closure;
|
Register shared_function_info = closure;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
shared_function_info,
|
shared_function_info,
|
||||||
MemOperand(
|
MemOperand(
|
||||||
closure,
|
closure,
|
||||||
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
||||||
closure = no_reg;
|
closure = no_reg;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_data,
|
function_data,
|
||||||
MemOperand(shared_function_info,
|
MemOperand(shared_function_info,
|
||||||
SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag));
|
SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag));
|
||||||
shared_function_info = no_reg;
|
shared_function_info = no_reg;
|
||||||
|
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
wasm_instance,
|
wasm_instance,
|
||||||
MemOperand(function_data,
|
MemOperand(function_data,
|
||||||
WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
|
WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
|
||||||
@ -3224,7 +3217,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
|||||||
Register suspender = rax; // Fixed.
|
Register suspender = rax; // Fixed.
|
||||||
__ movq(MemOperand(rbp, kSuspenderOffset), suspender);
|
__ movq(MemOperand(rbp, kSuspenderOffset), suspender);
|
||||||
Register target_continuation = rax;
|
Register target_continuation = rax;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
target_continuation,
|
target_continuation,
|
||||||
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
suspender = no_reg;
|
suspender = no_reg;
|
||||||
@ -3728,7 +3721,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
|||||||
|
|
||||||
Register function_entry = function_data;
|
Register function_entry = function_data;
|
||||||
Register scratch = r12;
|
Register scratch = r12;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_entry,
|
function_entry,
|
||||||
FieldOperand(function_data, WasmExportedFunctionData::kInternalOffset));
|
FieldOperand(function_data, WasmExportedFunctionData::kInternalOffset));
|
||||||
__ LoadExternalPointerField(
|
__ LoadExternalPointerField(
|
||||||
@ -4081,7 +4074,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||||||
// live: [rax, rbx, rcx]
|
// live: [rax, rbx, rcx]
|
||||||
|
|
||||||
Register suspender_continuation = rdx;
|
Register suspender_continuation = rdx;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender_continuation,
|
suspender_continuation,
|
||||||
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
@ -4102,12 +4095,12 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||||||
// Update roots.
|
// Update roots.
|
||||||
// -------------------------------------------
|
// -------------------------------------------
|
||||||
Register caller = rcx;
|
Register caller = rcx;
|
||||||
__ LoadAnyTaggedField(caller,
|
__ LoadTaggedField(caller,
|
||||||
FieldOperand(suspender_continuation,
|
FieldOperand(suspender_continuation,
|
||||||
WasmContinuationObject::kParentOffset));
|
WasmContinuationObject::kParentOffset));
|
||||||
__ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), caller);
|
__ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), caller);
|
||||||
Register parent = rdx;
|
Register parent = rdx;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||||
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), parent);
|
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), parent);
|
||||||
parent = no_reg;
|
parent = no_reg;
|
||||||
@ -4172,19 +4165,19 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
// Load suspender from closure.
|
// Load suspender from closure.
|
||||||
// -------------------------------------------
|
// -------------------------------------------
|
||||||
Register sfi = closure;
|
Register sfi = closure;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
sfi,
|
sfi,
|
||||||
MemOperand(
|
MemOperand(
|
||||||
closure,
|
closure,
|
||||||
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
||||||
Register function_data = sfi;
|
Register function_data = sfi;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_data,
|
function_data,
|
||||||
FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
|
FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
// The write barrier uses a fixed register for the host object (rdi). The next
|
// The write barrier uses a fixed register for the host object (rdi). The next
|
||||||
// barrier is on the suspender, so load it in rdi directly.
|
// barrier is on the suspender, so load it in rdi directly.
|
||||||
Register suspender = rdi;
|
Register suspender = rdi;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender, FieldOperand(function_data, WasmResumeData::kSuspenderOffset));
|
suspender, FieldOperand(function_data, WasmResumeData::kSuspenderOffset));
|
||||||
// Check the suspender state.
|
// Check the suspender state.
|
||||||
Label suspender_is_suspended;
|
Label suspender_is_suspended;
|
||||||
@ -4233,7 +4226,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender);
|
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender);
|
||||||
|
|
||||||
Register target_continuation = suspender;
|
Register target_continuation = suspender;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
target_continuation,
|
target_continuation,
|
||||||
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
suspender = no_reg;
|
suspender = no_reg;
|
||||||
@ -4848,16 +4841,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
// Insert additional parameters into the stack frame above return address.
|
// Insert additional parameters into the stack frame above return address.
|
||||||
__ PopReturnAddressTo(scratch);
|
__ PopReturnAddressTo(scratch);
|
||||||
__ Push(receiver);
|
__ Push(receiver);
|
||||||
__ PushTaggedAnyField(FieldOperand(callback, AccessorInfo::kDataOffset),
|
__ PushTaggedField(FieldOperand(callback, AccessorInfo::kDataOffset),
|
||||||
decompr_scratch1);
|
decompr_scratch1);
|
||||||
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
|
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
|
||||||
__ Push(kScratchRegister); // return value
|
__ Push(kScratchRegister); // return value
|
||||||
__ Push(kScratchRegister); // return value default
|
__ Push(kScratchRegister); // return value default
|
||||||
__ PushAddress(ExternalReference::isolate_address(masm->isolate()));
|
__ PushAddress(ExternalReference::isolate_address(masm->isolate()));
|
||||||
__ Push(holder);
|
__ Push(holder);
|
||||||
__ Push(Smi::zero()); // should_throw_on_error -> false
|
__ Push(Smi::zero()); // should_throw_on_error -> false
|
||||||
__ PushTaggedPointerField(FieldOperand(callback, AccessorInfo::kNameOffset),
|
__ PushTaggedField(FieldOperand(callback, AccessorInfo::kNameOffset),
|
||||||
decompr_scratch1);
|
decompr_scratch1);
|
||||||
__ PushReturnAddressFrom(scratch);
|
__ PushReturnAddressFrom(scratch);
|
||||||
|
|
||||||
// v8::PropertyCallbackInfo::args_ array and name handle.
|
// v8::PropertyCallbackInfo::args_ array and name handle.
|
||||||
@ -5129,12 +5122,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
// Get the InstructionStream object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = rbx;
|
Register code_obj = rbx;
|
||||||
TaggedRegister shared_function_info(code_obj);
|
TaggedRegister shared_function_info(code_obj);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
shared_function_info,
|
shared_function_info,
|
||||||
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(code_obj,
|
||||||
code_obj, FieldOperand(shared_function_info,
|
FieldOperand(shared_function_info,
|
||||||
SharedFunctionInfo::kFunctionDataOffset));
|
SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
// Check if we have baseline code. For OSR entry it is safe to assume we
|
// Check if we have baseline code. For OSR entry it is safe to assume we
|
||||||
// always have baseline code.
|
// always have baseline code.
|
||||||
@ -5166,10 +5159,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
Register feedback_vector = r11;
|
Register feedback_vector = r11;
|
||||||
|
|
||||||
TaggedRegister feedback_cell(feedback_vector);
|
TaggedRegister feedback_cell(feedback_vector);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_cell,
|
||||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(feedback_vector,
|
__ LoadTaggedField(feedback_vector,
|
||||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
|
@ -659,8 +659,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
|||||||
Tagged_t compressed =
|
Tagged_t compressed =
|
||||||
Assembler::target_compressed_address_at(pc_, constant_pool_);
|
Assembler::target_compressed_address_at(pc_, constant_pool_);
|
||||||
DCHECK(!HAS_SMI_TAG(compressed));
|
DCHECK(!HAS_SMI_TAG(compressed));
|
||||||
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
|
Object obj(
|
||||||
compressed));
|
V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
|
||||||
// Embedding of compressed InstructionStream objects must not happen when
|
// Embedding of compressed InstructionStream objects must not happen when
|
||||||
// external code space is enabled, because Codes must be used
|
// external code space is enabled, because Codes must be used
|
||||||
// instead.
|
// instead.
|
||||||
|
@ -1522,10 +1522,9 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
|
|||||||
|
|
||||||
bind(&maybe_has_optimized_code);
|
bind(&maybe_has_optimized_code);
|
||||||
Register optimized_code_entry = x7;
|
Register optimized_code_entry = x7;
|
||||||
LoadAnyTaggedField(
|
LoadTaggedField(optimized_code_entry,
|
||||||
optimized_code_entry,
|
FieldMemOperand(feedback_vector,
|
||||||
FieldMemOperand(feedback_vector,
|
FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||||
FeedbackVector::kMaybeOptimizedCodeOffset));
|
|
||||||
TailCallOptimizedCodeSlot(this, optimized_code_entry, x4);
|
TailCallOptimizedCodeSlot(this, optimized_code_entry, x4);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1844,7 +1843,7 @@ void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) {
|
|||||||
void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
|
void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// TODO(v8:13466, olivf): With static roots we could use
|
// TODO(v8:13466, olivf): With static roots we could use
|
||||||
// DecompressTaggedPointer here. However, currently all roots have addresses
|
// DecompressTagged here. However, currently all roots have addresses
|
||||||
// that are too large to fit into addition immediate operands. Evidence
|
// that are too large to fit into addition immediate operands. Evidence
|
||||||
// suggests that the extra instruction for decompression costs us more than
|
// suggests that the extra instruction for decompression costs us more than
|
||||||
// the load.
|
// the load.
|
||||||
@ -2061,9 +2060,9 @@ void MacroAssembler::LoadFromConstantsTable(Register destination,
|
|||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
|
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
|
||||||
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(destination,
|
||||||
destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
||||||
constant_index)));
|
constant_index)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
|
void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
|
||||||
@ -2449,8 +2448,8 @@ void MacroAssembler::BailoutIfDeoptimized() {
|
|||||||
UseScratchRegisterScope temps(this);
|
UseScratchRegisterScope temps(this);
|
||||||
Register scratch = temps.AcquireX();
|
Register scratch = temps.AcquireX();
|
||||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||||
LoadTaggedPointerField(scratch,
|
LoadTaggedField(scratch,
|
||||||
MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
||||||
Ldr(scratch.W(), FieldMemOperand(scratch, Code::kKindSpecificFlagsOffset));
|
Ldr(scratch.W(), FieldMemOperand(scratch, Code::kKindSpecificFlagsOffset));
|
||||||
Label not_deoptimized;
|
Label not_deoptimized;
|
||||||
Tbz(scratch.W(), InstructionStream::kMarkedForDeoptimizationBit,
|
Tbz(scratch.W(), InstructionStream::kMarkedForDeoptimizationBit,
|
||||||
@ -2663,8 +2662,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
|||||||
// allow recompilation to take effect without changing any of the
|
// allow recompilation to take effect without changing any of the
|
||||||
// call sites.
|
// call sites.
|
||||||
Register code = kJavaScriptCallCodeStartRegister;
|
Register code = kJavaScriptCallCodeStartRegister;
|
||||||
LoadTaggedPointerField(code,
|
LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||||
FieldMemOperand(function, JSFunction::kCodeOffset));
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case InvokeType::kCall:
|
case InvokeType::kCall:
|
||||||
CallCodeObject(code);
|
CallCodeObject(code);
|
||||||
@ -2715,12 +2713,11 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
|
|||||||
|
|
||||||
Register expected_parameter_count = x2;
|
Register expected_parameter_count = x2;
|
||||||
|
|
||||||
LoadTaggedPointerField(cp,
|
LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(function, JSFunction::kContextOffset));
|
|
||||||
// The number of arguments is stored as an int32_t, and -1 is a marker
|
// The number of arguments is stored as an int32_t, and -1 is a marker
|
||||||
// (kDontAdaptArgumentsSentinel), so we need sign
|
// (kDontAdaptArgumentsSentinel), so we need sign
|
||||||
// extension to correctly handle it.
|
// extension to correctly handle it.
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
expected_parameter_count,
|
expected_parameter_count,
|
||||||
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
||||||
Ldrh(expected_parameter_count,
|
Ldrh(expected_parameter_count,
|
||||||
@ -2744,8 +2741,7 @@ void MacroAssembler::InvokeFunction(Register function,
|
|||||||
DCHECK_EQ(function, x1);
|
DCHECK_EQ(function, x1);
|
||||||
|
|
||||||
// Set up the context.
|
// Set up the context.
|
||||||
LoadTaggedPointerField(cp,
|
LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(function, JSFunction::kContextOffset));
|
|
||||||
|
|
||||||
InvokeFunctionCode(function, no_reg, expected_parameter_count,
|
InvokeFunctionCode(function, no_reg, expected_parameter_count,
|
||||||
actual_parameter_count, type);
|
actual_parameter_count, type);
|
||||||
@ -3012,7 +3008,7 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
|
|||||||
|
|
||||||
void MacroAssembler::LoadMap(Register dst, Register object) {
|
void MacroAssembler::LoadMap(Register dst, Register object) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
LoadTaggedPointerField(dst, FieldMemOperand(object, HeapObject::kMapOffset));
|
LoadTaggedField(dst, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets condition flags based on comparison, and returns type in type_reg.
|
// Sets condition flags based on comparison, and returns type in type_reg.
|
||||||
@ -3086,19 +3082,10 @@ void MacroAssembler::JumpIfIsInRange(const Register& value,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadTaggedPointerField(const Register& destination,
|
void MacroAssembler::LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand) {
|
const MemOperand& field_operand) {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
DecompressTaggedPointer(destination, field_operand);
|
DecompressTagged(destination, field_operand);
|
||||||
} else {
|
|
||||||
Ldr(destination, field_operand);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::LoadAnyTaggedField(const Register& destination,
|
|
||||||
const MemOperand& field_operand) {
|
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
|
||||||
DecompressAnyTagged(destination, field_operand);
|
|
||||||
} else {
|
} else {
|
||||||
Ldr(destination, field_operand);
|
Ldr(destination, field_operand);
|
||||||
}
|
}
|
||||||
@ -3149,21 +3136,21 @@ void MacroAssembler::DecompressTaggedSigned(const Register& destination,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||||
const MemOperand& field_operand) {
|
const MemOperand& field_operand) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
Ldr(destination.W(), field_operand);
|
Ldr(destination.W(), field_operand);
|
||||||
Add(destination, kPtrComprCageBaseRegister, destination);
|
Add(destination, kPtrComprCageBaseRegister, destination);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||||
const Register& source) {
|
const Register& source) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
Add(destination, kPtrComprCageBaseRegister, Operand(source, UXTW));
|
Add(destination, kPtrComprCageBaseRegister, Operand(source, UXTW));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||||
Tagged_t immediate) {
|
Tagged_t immediate) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (IsImmAddSub(immediate)) {
|
if (IsImmAddSub(immediate)) {
|
||||||
Add(destination, kPtrComprCageBaseRegister,
|
Add(destination, kPtrComprCageBaseRegister,
|
||||||
@ -3178,13 +3165,6 @@ void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressAnyTagged(const Register& destination,
|
|
||||||
const MemOperand& field_operand) {
|
|
||||||
ASM_CODE_COMMENT(this);
|
|
||||||
Ldr(destination.W(), field_operand);
|
|
||||||
Add(destination, kPtrComprCageBaseRegister, destination);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::AtomicDecompressTaggedSigned(const Register& destination,
|
void MacroAssembler::AtomicDecompressTaggedSigned(const Register& destination,
|
||||||
const Register& base,
|
const Register& base,
|
||||||
const Register& index,
|
const Register& index,
|
||||||
@ -3199,20 +3179,10 @@ void MacroAssembler::AtomicDecompressTaggedSigned(const Register& destination,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::AtomicDecompressTaggedPointer(const Register& destination,
|
void MacroAssembler::AtomicDecompressTagged(const Register& destination,
|
||||||
const Register& base,
|
const Register& base,
|
||||||
const Register& index,
|
const Register& index,
|
||||||
const Register& temp) {
|
const Register& temp) {
|
||||||
ASM_CODE_COMMENT(this);
|
|
||||||
Add(temp, base, index);
|
|
||||||
Ldar(destination.W(), temp);
|
|
||||||
Add(destination, kPtrComprCageBaseRegister, destination);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::AtomicDecompressAnyTagged(const Register& destination,
|
|
||||||
const Register& base,
|
|
||||||
const Register& index,
|
|
||||||
const Register& temp) {
|
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
Add(temp, base, index);
|
Add(temp, base, index);
|
||||||
Ldar(destination.W(), temp);
|
Ldar(destination.W(), temp);
|
||||||
@ -3465,7 +3435,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
|
|||||||
Register temp = temps.AcquireX();
|
Register temp = temps.AcquireX();
|
||||||
DCHECK(!AreAliased(object, value, temp));
|
DCHECK(!AreAliased(object, value, temp));
|
||||||
Add(temp, object, offset);
|
Add(temp, object, offset);
|
||||||
LoadTaggedPointerField(temp, MemOperand(temp));
|
LoadTaggedField(temp, MemOperand(temp));
|
||||||
Cmp(temp, value);
|
Cmp(temp, value);
|
||||||
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
|
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
|
||||||
}
|
}
|
||||||
@ -3572,10 +3542,10 @@ void MacroAssembler::Abort(AbortReason reason) {
|
|||||||
|
|
||||||
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
||||||
LoadMap(dst, cp);
|
LoadMap(dst, cp);
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
dst, FieldMemOperand(
|
dst, FieldMemOperand(
|
||||||
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
||||||
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||||
@ -3584,7 +3554,7 @@ void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough, clear_slot;
|
Label fallthrough, clear_slot;
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
scratch_and_result,
|
scratch_and_result,
|
||||||
FieldMemOperand(feedback_vector,
|
FieldMemOperand(feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt())));
|
||||||
|
@ -1400,14 +1400,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Pointer compression Support
|
// Pointer compression Support
|
||||||
|
|
||||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
|
||||||
// compression is enabled.
|
|
||||||
void LoadTaggedPointerField(const Register& destination,
|
|
||||||
const MemOperand& field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||||
void LoadAnyTaggedField(const Register& destination,
|
void LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
|
|
||||||
// Loads a field containing a tagged signed value and decompresses it if
|
// Loads a field containing a tagged signed value and decompresses it if
|
||||||
// necessary.
|
// necessary.
|
||||||
@ -1432,24 +1427,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||||||
|
|
||||||
void DecompressTaggedSigned(const Register& destination,
|
void DecompressTaggedSigned(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
void DecompressTaggedPointer(const Register& destination,
|
void DecompressTagged(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
void DecompressTaggedPointer(const Register& destination,
|
void DecompressTagged(const Register& destination, const Register& source);
|
||||||
const Register& source);
|
void DecompressTagged(const Register& destination, Tagged_t immediate);
|
||||||
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
|
|
||||||
void DecompressAnyTagged(const Register& destination,
|
|
||||||
const MemOperand& field_operand);
|
|
||||||
|
|
||||||
void AtomicDecompressTaggedSigned(const Register& destination,
|
void AtomicDecompressTaggedSigned(const Register& destination,
|
||||||
const Register& base, const Register& index,
|
const Register& base, const Register& index,
|
||||||
const Register& temp);
|
const Register& temp);
|
||||||
void AtomicDecompressTaggedPointer(const Register& destination,
|
void AtomicDecompressTagged(const Register& destination, const Register& base,
|
||||||
const Register& base,
|
const Register& index, const Register& temp);
|
||||||
const Register& index,
|
|
||||||
const Register& temp);
|
|
||||||
void AtomicDecompressAnyTagged(const Register& destination,
|
|
||||||
const Register& base, const Register& index,
|
|
||||||
const Register& temp);
|
|
||||||
|
|
||||||
// Restore FP and LR from the values stored in the current frame. This will
|
// Restore FP and LR from the values stored in the current frame. This will
|
||||||
// authenticate the LR when pointer authentication is enabled.
|
// authenticate the LR when pointer authentication is enabled.
|
||||||
|
@ -148,7 +148,7 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc,
|
|||||||
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||||
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
||||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
|
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base,
|
cage_base,
|
||||||
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
||||||
} else {
|
} else {
|
||||||
|
@ -120,11 +120,10 @@ void MacroAssembler::LoadFromConstantsTable(Register destination,
|
|||||||
|
|
||||||
DCHECK_NE(destination, r0);
|
DCHECK_NE(destination, r0);
|
||||||
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(destination,
|
||||||
destination,
|
FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
||||||
FieldMemOperand(destination,
|
constant_index)),
|
||||||
FixedArray::OffsetOfElementAt(constant_index)),
|
r0);
|
||||||
r0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
|
void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
|
||||||
@ -624,28 +623,18 @@ void MacroAssembler::LoadRoot(Register destination, RootIndex index,
|
|||||||
Condition cond) {
|
Condition cond) {
|
||||||
DCHECK(cond == al);
|
DCHECK(cond == al);
|
||||||
if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
|
if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
|
||||||
DecompressTaggedPointer(destination, ReadOnlyRootPtr(index));
|
DecompressTagged(destination, ReadOnlyRootPtr(index));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
LoadU64(destination,
|
LoadU64(destination,
|
||||||
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
|
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadTaggedPointerField(const Register& destination,
|
void MacroAssembler::LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand,
|
const MemOperand& field_operand,
|
||||||
const Register& scratch) {
|
const Register& scratch) {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
DecompressTaggedPointer(destination, field_operand);
|
DecompressTagged(destination, field_operand);
|
||||||
} else {
|
|
||||||
LoadU64(destination, field_operand, scratch);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::LoadAnyTaggedField(const Register& destination,
|
|
||||||
const MemOperand& field_operand,
|
|
||||||
const Register& scratch) {
|
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
|
||||||
DecompressAnyTagged(destination, field_operand);
|
|
||||||
} else {
|
} else {
|
||||||
LoadU64(destination, field_operand, scratch);
|
LoadU64(destination, field_operand, scratch);
|
||||||
}
|
}
|
||||||
@ -688,45 +677,28 @@ void MacroAssembler::DecompressTaggedSigned(Register destination,
|
|||||||
RecordComment("]");
|
RecordComment("]");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
void MacroAssembler::DecompressTagged(Register destination, Register source) {
|
||||||
Register source) {
|
RecordComment("[ DecompressTagged");
|
||||||
RecordComment("[ DecompressTaggedPointer");
|
|
||||||
ZeroExtWord32(destination, source);
|
ZeroExtWord32(destination, source);
|
||||||
add(destination, destination, kPtrComprCageBaseRegister);
|
add(destination, destination, kPtrComprCageBaseRegister);
|
||||||
RecordComment("]");
|
RecordComment("]");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
void MacroAssembler::DecompressTagged(Register destination,
|
||||||
MemOperand field_operand) {
|
MemOperand field_operand) {
|
||||||
RecordComment("[ DecompressTaggedPointer");
|
RecordComment("[ DecompressTagged");
|
||||||
LoadU32(destination, field_operand, r0);
|
LoadU32(destination, field_operand, r0);
|
||||||
add(destination, destination, kPtrComprCageBaseRegister);
|
add(destination, destination, kPtrComprCageBaseRegister);
|
||||||
RecordComment("]");
|
RecordComment("]");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||||
Tagged_t immediate) {
|
Tagged_t immediate) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
AddS64(destination, kPtrComprCageBaseRegister,
|
AddS64(destination, kPtrComprCageBaseRegister,
|
||||||
Operand(immediate, RelocInfo::Mode::NO_INFO));
|
Operand(immediate, RelocInfo::Mode::NO_INFO));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressAnyTagged(Register destination,
|
|
||||||
MemOperand field_operand) {
|
|
||||||
RecordComment("[ DecompressAnyTagged");
|
|
||||||
LoadU32(destination, field_operand, r0);
|
|
||||||
add(destination, destination, kPtrComprCageBaseRegister);
|
|
||||||
RecordComment("]");
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::DecompressAnyTagged(Register destination,
|
|
||||||
Register source) {
|
|
||||||
RecordComment("[ DecompressAnyTagged");
|
|
||||||
ZeroExtWord32(destination, source);
|
|
||||||
add(destination, destination, kPtrComprCageBaseRegister);
|
|
||||||
RecordComment("]");
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::LoadTaggedSignedField(Register destination,
|
void MacroAssembler::LoadTaggedSignedField(Register destination,
|
||||||
MemOperand field_operand,
|
MemOperand field_operand,
|
||||||
Register scratch) {
|
Register scratch) {
|
||||||
@ -861,7 +833,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
|
|||||||
SaveFPRegsMode fp_mode, SmiCheck smi_check) {
|
SaveFPRegsMode fp_mode, SmiCheck smi_check) {
|
||||||
DCHECK(!AreAliased(object, value, slot_address));
|
DCHECK(!AreAliased(object, value, slot_address));
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
LoadTaggedPointerField(r0, MemOperand(slot_address));
|
LoadTaggedField(r0, MemOperand(slot_address));
|
||||||
CmpS64(r0, value);
|
CmpS64(r0, value);
|
||||||
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
|
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
|
||||||
}
|
}
|
||||||
@ -1645,8 +1617,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
|||||||
// allow recompilation to take effect without changing any of the
|
// allow recompilation to take effect without changing any of the
|
||||||
// call sites.
|
// call sites.
|
||||||
Register code = kJavaScriptCallCodeStartRegister;
|
Register code = kJavaScriptCallCodeStartRegister;
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset), r0);
|
||||||
code, FieldMemOperand(function, JSFunction::kCodeOffset), r0);
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case InvokeType::kCall:
|
case InvokeType::kCall:
|
||||||
CallCodeObject(code);
|
CallCodeObject(code);
|
||||||
@ -1673,10 +1644,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
|
|||||||
Register expected_reg = r5;
|
Register expected_reg = r5;
|
||||||
Register temp_reg = r7;
|
Register temp_reg = r7;
|
||||||
|
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
|
LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
|
||||||
r0);
|
|
||||||
LoadU16(expected_reg,
|
LoadU16(expected_reg,
|
||||||
FieldMemOperand(temp_reg,
|
FieldMemOperand(temp_reg,
|
||||||
SharedFunctionInfo::kFormalParameterCountOffset));
|
SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -1696,8 +1666,7 @@ void MacroAssembler::InvokeFunction(Register function,
|
|||||||
DCHECK_EQ(function, r4);
|
DCHECK_EQ(function, r4);
|
||||||
|
|
||||||
// Get the function and setup the context.
|
// Get the function and setup the context.
|
||||||
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
|
LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
|
||||||
r0);
|
|
||||||
|
|
||||||
InvokeFunctionCode(r4, no_reg, expected_parameter_count,
|
InvokeFunctionCode(r4, no_reg, expected_parameter_count,
|
||||||
actual_parameter_count, type);
|
actual_parameter_count, type);
|
||||||
@ -2163,10 +2132,10 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
|
|||||||
|
|
||||||
bind(&maybe_has_optimized_code);
|
bind(&maybe_has_optimized_code);
|
||||||
Register optimized_code_entry = flags;
|
Register optimized_code_entry = flags;
|
||||||
LoadAnyTaggedField(optimized_code_entry,
|
LoadTaggedField(optimized_code_entry,
|
||||||
FieldMemOperand(feedback_vector,
|
FieldMemOperand(feedback_vector,
|
||||||
FeedbackVector::kMaybeOptimizedCodeOffset),
|
FeedbackVector::kMaybeOptimizedCodeOffset),
|
||||||
r0);
|
r0);
|
||||||
TailCallOptimizedCodeSlot(this, optimized_code_entry, r9);
|
TailCallOptimizedCodeSlot(this, optimized_code_entry, r9);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2307,17 +2276,17 @@ void MacroAssembler::Abort(AbortReason reason) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadMap(Register destination, Register object) {
|
void MacroAssembler::LoadMap(Register destination, Register object) {
|
||||||
LoadTaggedPointerField(destination,
|
LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset),
|
||||||
FieldMemOperand(object, HeapObject::kMapOffset), r0);
|
r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
||||||
LoadMap(dst, cp);
|
LoadMap(dst, cp);
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
dst,
|
dst,
|
||||||
FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset),
|
FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset),
|
||||||
r0);
|
r0);
|
||||||
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
|
LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef V8_ENABLE_DEBUG_CODE
|
#ifdef V8_ENABLE_DEBUG_CODE
|
||||||
|
@ -1010,19 +1010,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||||
// compression is enabled.
|
void LoadTaggedField(const Register& destination,
|
||||||
void LoadTaggedPointerField(const Register& destination,
|
const MemOperand& field_operand,
|
||||||
const MemOperand& field_operand,
|
const Register& scratch = no_reg);
|
||||||
const Register& scratch = no_reg);
|
|
||||||
void LoadTaggedSignedField(Register destination, MemOperand field_operand,
|
void LoadTaggedSignedField(Register destination, MemOperand field_operand,
|
||||||
Register scratch);
|
Register scratch);
|
||||||
|
|
||||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
|
||||||
void LoadAnyTaggedField(const Register& destination,
|
|
||||||
const MemOperand& field_operand,
|
|
||||||
const Register& scratch = no_reg);
|
|
||||||
|
|
||||||
// Compresses and stores tagged value to given on-heap location.
|
// Compresses and stores tagged value to given on-heap location.
|
||||||
void StoreTaggedField(const Register& value,
|
void StoreTaggedField(const Register& value,
|
||||||
const MemOperand& dst_field_operand,
|
const MemOperand& dst_field_operand,
|
||||||
@ -1030,11 +1024,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||||||
|
|
||||||
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
|
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
|
||||||
void DecompressTaggedSigned(Register destination, Register src);
|
void DecompressTaggedSigned(Register destination, Register src);
|
||||||
void DecompressTaggedPointer(Register destination, MemOperand field_operand);
|
void DecompressTagged(Register destination, MemOperand field_operand);
|
||||||
void DecompressTaggedPointer(Register destination, Register source);
|
void DecompressTagged(Register destination, Register source);
|
||||||
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
|
void DecompressTagged(const Register& destination, Tagged_t immediate);
|
||||||
void DecompressAnyTagged(Register destination, MemOperand field_operand);
|
|
||||||
void DecompressAnyTagged(Register destination, Register source);
|
|
||||||
|
|
||||||
void LoadF64(DoubleRegister dst, const MemOperand& mem,
|
void LoadF64(DoubleRegister dst, const MemOperand& mem,
|
||||||
Register scratch = no_reg);
|
Register scratch = no_reg);
|
||||||
|
@ -162,7 +162,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
|
|||||||
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||||
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
||||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
|
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base,
|
cage_base,
|
||||||
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
||||||
} else {
|
} else {
|
||||||
|
@ -238,10 +238,9 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
|
|||||||
|
|
||||||
bind(&maybe_has_optimized_code);
|
bind(&maybe_has_optimized_code);
|
||||||
Register optimized_code_entry = flags;
|
Register optimized_code_entry = flags;
|
||||||
LoadAnyTaggedField(
|
LoadTaggedField(optimized_code_entry,
|
||||||
optimized_code_entry,
|
FieldMemOperand(feedback_vector,
|
||||||
FieldMemOperand(feedback_vector,
|
FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||||
FeedbackVector::kMaybeOptimizedCodeOffset));
|
|
||||||
TailCallOptimizedCodeSlot(this, optimized_code_entry, temps.Acquire(),
|
TailCallOptimizedCodeSlot(this, optimized_code_entry, temps.Acquire(),
|
||||||
temps.Acquire());
|
temps.Acquire());
|
||||||
}
|
}
|
||||||
@ -413,7 +412,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
|
|||||||
Register temp = temps.Acquire();
|
Register temp = temps.Acquire();
|
||||||
DCHECK(!AreAliased(object, value, temp));
|
DCHECK(!AreAliased(object, value, temp));
|
||||||
AddWord(temp, object, offset);
|
AddWord(temp, object, offset);
|
||||||
LoadTaggedPointerField(temp, MemOperand(temp));
|
LoadTaggedField(temp, MemOperand(temp));
|
||||||
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, temp,
|
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, temp,
|
||||||
Operand(value));
|
Operand(value));
|
||||||
}
|
}
|
||||||
@ -4216,9 +4215,9 @@ void MacroAssembler::LoadFromConstantsTable(Register destination,
|
|||||||
int constant_index) {
|
int constant_index) {
|
||||||
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
|
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
|
||||||
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(destination,
|
||||||
destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
||||||
constant_index)));
|
constant_index)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
|
void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
|
||||||
@ -4916,8 +4915,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
|||||||
// allow recompilation to take effect without changing any of the
|
// allow recompilation to take effect without changing any of the
|
||||||
// call sites.
|
// call sites.
|
||||||
Register code = kJavaScriptCallCodeStartRegister;
|
Register code = kJavaScriptCallCodeStartRegister;
|
||||||
LoadTaggedPointerField(code,
|
LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||||
FieldMemOperand(function, JSFunction::kCodeOffset));
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case InvokeType::kCall:
|
case InvokeType::kCall:
|
||||||
CallCodeObject(code);
|
CallCodeObject(code);
|
||||||
@ -4944,11 +4942,10 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
|
|||||||
{
|
{
|
||||||
UseScratchRegisterScope temps(this);
|
UseScratchRegisterScope temps(this);
|
||||||
Register temp_reg = temps.Acquire();
|
Register temp_reg = temps.Acquire();
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
temp_reg,
|
temp_reg,
|
||||||
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
||||||
cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
|
||||||
// The argument count is stored as uint16_t
|
// The argument count is stored as uint16_t
|
||||||
Lhu(expected_parameter_count,
|
Lhu(expected_parameter_count,
|
||||||
FieldMemOperand(temp_reg,
|
FieldMemOperand(temp_reg,
|
||||||
@ -4969,7 +4966,7 @@ void MacroAssembler::InvokeFunction(Register function,
|
|||||||
DCHECK_EQ(function, a1);
|
DCHECK_EQ(function, a1);
|
||||||
|
|
||||||
// Get the function and setup the context.
|
// Get the function and setup the context.
|
||||||
LoadTaggedPointerField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
||||||
|
|
||||||
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
|
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
|
||||||
actual_parameter_count, type);
|
actual_parameter_count, type);
|
||||||
@ -5498,17 +5495,16 @@ void MacroAssembler::Abort(AbortReason reason) {
|
|||||||
|
|
||||||
void MacroAssembler::LoadMap(Register destination, Register object) {
|
void MacroAssembler::LoadMap(Register destination, Register object) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
LoadTaggedPointerField(destination,
|
LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(object, HeapObject::kMapOffset));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
LoadMap(dst, cp);
|
LoadMap(dst, cp);
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
dst, FieldMemOperand(
|
dst, FieldMemOperand(
|
||||||
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
||||||
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::StubPrologue(StackFrame::Type type) {
|
void MacroAssembler::StubPrologue(StackFrame::Type type) {
|
||||||
@ -6186,19 +6182,10 @@ void MacroAssembler::JumpCodeObject(Register code, JumpMode jump_mode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if V8_TARGET_ARCH_RISCV64
|
#if V8_TARGET_ARCH_RISCV64
|
||||||
void MacroAssembler::LoadTaggedPointerField(const Register& destination,
|
void MacroAssembler::LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand) {
|
const MemOperand& field_operand) {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
DecompressTaggedPointer(destination, field_operand);
|
DecompressTagged(destination, field_operand);
|
||||||
} else {
|
|
||||||
Ld(destination, field_operand);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::LoadAnyTaggedField(const Register& destination,
|
|
||||||
const MemOperand& field_operand) {
|
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
|
||||||
DecompressAnyTagged(destination, field_operand);
|
|
||||||
} else {
|
} else {
|
||||||
Ld(destination, field_operand);
|
Ld(destination, field_operand);
|
||||||
}
|
}
|
||||||
@ -6237,26 +6224,19 @@ void MacroAssembler::DecompressTaggedSigned(const Register& destination,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||||
const MemOperand& field_operand) {
|
const MemOperand& field_operand) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
Lwu(destination, field_operand);
|
Lwu(destination, field_operand);
|
||||||
AddWord(destination, kPtrComprCageBaseRegister, destination);
|
AddWord(destination, kPtrComprCageBaseRegister, destination);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||||
const Register& source) {
|
const Register& source) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
And(destination, source, Operand(0xFFFFFFFF));
|
And(destination, source, Operand(0xFFFFFFFF));
|
||||||
AddWord(destination, kPtrComprCageBaseRegister, Operand(destination));
|
AddWord(destination, kPtrComprCageBaseRegister, Operand(destination));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressAnyTagged(const Register& destination,
|
|
||||||
const MemOperand& field_operand) {
|
|
||||||
ASM_CODE_COMMENT(this);
|
|
||||||
Lwu(destination, field_operand);
|
|
||||||
AddWord(destination, kPtrComprCageBaseRegister, destination);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
|
void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
|
||||||
ArgumentsCountMode mode, Register scratch) {
|
ArgumentsCountMode mode, Register scratch) {
|
||||||
|
@ -1072,14 +1072,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Pointer compression Support
|
// Pointer compression Support
|
||||||
|
|
||||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
|
||||||
// compression is enabled.
|
|
||||||
void LoadTaggedPointerField(const Register& destination,
|
|
||||||
const MemOperand& field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||||
void LoadAnyTaggedField(const Register& destination,
|
void LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
|
|
||||||
// Loads a field containing a tagged signed value and decompresses it if
|
// Loads a field containing a tagged signed value and decompresses it if
|
||||||
// necessary.
|
// necessary.
|
||||||
@ -1095,12 +1090,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||||||
|
|
||||||
void DecompressTaggedSigned(const Register& destination,
|
void DecompressTaggedSigned(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
void DecompressTaggedPointer(const Register& destination,
|
void DecompressTagged(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
void DecompressTaggedPointer(const Register& destination,
|
void DecompressTagged(const Register& destination, const Register& source);
|
||||||
const Register& source);
|
|
||||||
void DecompressAnyTagged(const Register& destination,
|
|
||||||
const MemOperand& field_operand);
|
|
||||||
void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) {
|
void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
Sub32(rd, rs1, rs2);
|
Sub32(rd, rs1, rs2);
|
||||||
@ -1113,12 +1105,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||||||
// Pointer compression Support
|
// Pointer compression Support
|
||||||
// rv32 don't support Pointer compression. Defines these functions for
|
// rv32 don't support Pointer compression. Defines these functions for
|
||||||
// simplify builtins.
|
// simplify builtins.
|
||||||
inline void LoadTaggedPointerField(const Register& destination,
|
inline void LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand) {
|
const MemOperand& field_operand) {
|
||||||
Lw(destination, field_operand);
|
|
||||||
}
|
|
||||||
inline void LoadAnyTaggedField(const Register& destination,
|
|
||||||
const MemOperand& field_operand) {
|
|
||||||
Lw(destination, field_operand);
|
Lw(destination, field_operand);
|
||||||
}
|
}
|
||||||
inline void LoadTaggedSignedField(const Register& destination,
|
inline void LoadTaggedSignedField(const Register& destination,
|
||||||
|
@ -142,7 +142,7 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
|
|||||||
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||||
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
||||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
|
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base,
|
cage_base,
|
||||||
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
||||||
} else {
|
} else {
|
||||||
|
@ -343,11 +343,10 @@ void MacroAssembler::LoadFromConstantsTable(Register destination,
|
|||||||
CHECK(is_uint19(offset));
|
CHECK(is_uint19(offset));
|
||||||
DCHECK_NE(destination, r0);
|
DCHECK_NE(destination, r0);
|
||||||
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(destination,
|
||||||
destination,
|
FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
|
||||||
FieldMemOperand(destination,
|
constant_index)),
|
||||||
FixedArray::OffsetOfElementAt(constant_index)),
|
r1);
|
||||||
r1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
|
void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
|
||||||
@ -862,28 +861,18 @@ void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) {
|
|||||||
void MacroAssembler::LoadRoot(Register destination, RootIndex index,
|
void MacroAssembler::LoadRoot(Register destination, RootIndex index,
|
||||||
Condition) {
|
Condition) {
|
||||||
if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
|
if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
|
||||||
DecompressTaggedPointer(destination, ReadOnlyRootPtr(index));
|
DecompressTagged(destination, ReadOnlyRootPtr(index));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
LoadU64(destination,
|
LoadU64(destination,
|
||||||
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
|
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadTaggedPointerField(const Register& destination,
|
void MacroAssembler::LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand,
|
const MemOperand& field_operand,
|
||||||
const Register& scratch) {
|
const Register& scratch) {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
DecompressTaggedPointer(destination, field_operand);
|
DecompressTagged(destination, field_operand);
|
||||||
} else {
|
|
||||||
LoadU64(destination, field_operand, scratch);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::LoadAnyTaggedField(const Register& destination,
|
|
||||||
const MemOperand& field_operand,
|
|
||||||
const Register& scratch) {
|
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
|
||||||
DecompressAnyTagged(destination, field_operand);
|
|
||||||
} else {
|
} else {
|
||||||
LoadU64(destination, field_operand, scratch);
|
LoadU64(destination, field_operand, scratch);
|
||||||
}
|
}
|
||||||
@ -928,45 +917,28 @@ void MacroAssembler::DecompressTaggedSigned(Register destination,
|
|||||||
RecordComment("]");
|
RecordComment("]");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
void MacroAssembler::DecompressTagged(Register destination, Register source) {
|
||||||
Register source) {
|
RecordComment("[ DecompressTagged");
|
||||||
RecordComment("[ DecompressTaggedPointer");
|
|
||||||
llgfr(destination, source);
|
llgfr(destination, source);
|
||||||
agr(destination, kRootRegister);
|
agr(destination, kRootRegister);
|
||||||
RecordComment("]");
|
RecordComment("]");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
void MacroAssembler::DecompressTagged(Register destination,
|
||||||
MemOperand field_operand) {
|
MemOperand field_operand) {
|
||||||
RecordComment("[ DecompressTaggedPointer");
|
RecordComment("[ DecompressTagged");
|
||||||
llgf(destination, field_operand);
|
llgf(destination, field_operand);
|
||||||
agr(destination, kRootRegister);
|
agr(destination, kRootRegister);
|
||||||
RecordComment("]");
|
RecordComment("]");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(const Register& destination,
|
void MacroAssembler::DecompressTagged(const Register& destination,
|
||||||
Tagged_t immediate) {
|
Tagged_t immediate) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
mov(destination, Operand(immediate, RelocInfo::NO_INFO));
|
mov(destination, Operand(immediate, RelocInfo::NO_INFO));
|
||||||
agr(destination, kRootRegister);
|
agr(destination, kRootRegister);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressAnyTagged(Register destination,
|
|
||||||
MemOperand field_operand) {
|
|
||||||
RecordComment("[ DecompressAnyTagged");
|
|
||||||
llgf(destination, field_operand);
|
|
||||||
agr(destination, kRootRegister);
|
|
||||||
RecordComment("]");
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::DecompressAnyTagged(Register destination,
|
|
||||||
Register source) {
|
|
||||||
RecordComment("[ DecompressAnyTagged");
|
|
||||||
llgfr(destination, source);
|
|
||||||
agr(destination, kRootRegister);
|
|
||||||
RecordComment("]");
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::LoadTaggedSignedField(Register destination,
|
void MacroAssembler::LoadTaggedSignedField(Register destination,
|
||||||
MemOperand field_operand) {
|
MemOperand field_operand) {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
@ -1099,7 +1071,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
|
|||||||
SaveFPRegsMode fp_mode, SmiCheck smi_check) {
|
SaveFPRegsMode fp_mode, SmiCheck smi_check) {
|
||||||
DCHECK(!AreAliased(object, slot_address, value));
|
DCHECK(!AreAliased(object, slot_address, value));
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
LoadTaggedPointerField(r0, MemOperand(slot_address));
|
LoadTaggedField(r0, MemOperand(slot_address));
|
||||||
CmpS64(value, r0);
|
CmpS64(value, r0);
|
||||||
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
|
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
|
||||||
}
|
}
|
||||||
@ -1827,8 +1799,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
|||||||
// allow recompilation to take effect without changing any of the
|
// allow recompilation to take effect without changing any of the
|
||||||
// call sites.
|
// call sites.
|
||||||
Register code = kJavaScriptCallCodeStartRegister;
|
Register code = kJavaScriptCallCodeStartRegister;
|
||||||
LoadTaggedPointerField(code,
|
LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||||
FieldMemOperand(function, JSFunction::kCodeOffset));
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case InvokeType::kCall:
|
case InvokeType::kCall:
|
||||||
CallCodeObject(code);
|
CallCodeObject(code);
|
||||||
@ -1853,9 +1824,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
|
|||||||
|
|
||||||
Register expected_reg = r4;
|
Register expected_reg = r4;
|
||||||
Register temp_reg = r6;
|
Register temp_reg = r6;
|
||||||
LoadTaggedPointerField(cp, FieldMemOperand(fun, JSFunction::kContextOffset));
|
LoadTaggedField(cp, FieldMemOperand(fun, JSFunction::kContextOffset));
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(temp_reg,
|
||||||
temp_reg, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
|
||||||
LoadU16(
|
LoadU16(
|
||||||
expected_reg,
|
expected_reg,
|
||||||
FieldMemOperand(temp_reg,
|
FieldMemOperand(temp_reg,
|
||||||
@ -1876,8 +1847,7 @@ void MacroAssembler::InvokeFunction(Register function,
|
|||||||
DCHECK_EQ(function, r3);
|
DCHECK_EQ(function, r3);
|
||||||
|
|
||||||
// Get the function and setup the context.
|
// Get the function and setup the context.
|
||||||
LoadTaggedPointerField(cp,
|
LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(function, JSFunction::kContextOffset));
|
|
||||||
|
|
||||||
InvokeFunctionCode(r3, no_reg, expected_parameter_count,
|
InvokeFunctionCode(r3, no_reg, expected_parameter_count,
|
||||||
actual_parameter_count, type);
|
actual_parameter_count, type);
|
||||||
@ -2158,10 +2128,9 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
|
|||||||
|
|
||||||
bind(&maybe_has_optimized_code);
|
bind(&maybe_has_optimized_code);
|
||||||
Register optimized_code_entry = flags;
|
Register optimized_code_entry = flags;
|
||||||
LoadAnyTaggedField(
|
LoadTaggedField(optimized_code_entry,
|
||||||
optimized_code_entry,
|
FieldMemOperand(feedback_vector,
|
||||||
FieldMemOperand(feedback_vector,
|
FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||||
FeedbackVector::kMaybeOptimizedCodeOffset));
|
|
||||||
TailCallOptimizedCodeSlot(this, optimized_code_entry, r8);
|
TailCallOptimizedCodeSlot(this, optimized_code_entry, r8);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2301,16 +2270,15 @@ void MacroAssembler::Abort(AbortReason reason) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadMap(Register destination, Register object) {
|
void MacroAssembler::LoadMap(Register destination, Register object) {
|
||||||
LoadTaggedPointerField(destination,
|
LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(object, HeapObject::kMapOffset));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
||||||
LoadMap(dst, cp);
|
LoadMap(dst, cp);
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
dst, FieldMemOperand(
|
dst, FieldMemOperand(
|
||||||
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
||||||
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef V8_ENABLE_DEBUG_CODE
|
#ifdef V8_ENABLE_DEBUG_CODE
|
||||||
|
@ -1464,17 +1464,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
|
||||||
// compression is enabled.
|
|
||||||
void LoadTaggedPointerField(const Register& destination,
|
|
||||||
const MemOperand& field_operand,
|
|
||||||
const Register& scratch = no_reg);
|
|
||||||
void LoadTaggedSignedField(Register destination, MemOperand field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||||
void LoadAnyTaggedField(const Register& destination,
|
void LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand,
|
const MemOperand& field_operand,
|
||||||
const Register& scratch = no_reg);
|
const Register& scratch = no_reg);
|
||||||
|
void LoadTaggedSignedField(Register destination, MemOperand field_operand);
|
||||||
|
|
||||||
// Loads a field containing smi value and untags it.
|
// Loads a field containing smi value and untags it.
|
||||||
void SmiUntagField(Register dst, const MemOperand& src);
|
void SmiUntagField(Register dst, const MemOperand& src);
|
||||||
@ -1486,11 +1480,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||||||
|
|
||||||
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
|
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
|
||||||
void DecompressTaggedSigned(Register destination, Register src);
|
void DecompressTaggedSigned(Register destination, Register src);
|
||||||
void DecompressTaggedPointer(Register destination, MemOperand field_operand);
|
void DecompressTagged(Register destination, MemOperand field_operand);
|
||||||
void DecompressTaggedPointer(Register destination, Register source);
|
void DecompressTagged(Register destination, Register source);
|
||||||
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
|
void DecompressTagged(const Register& destination, Tagged_t immediate);
|
||||||
void DecompressAnyTagged(Register destination, MemOperand field_operand);
|
|
||||||
void DecompressAnyTagged(Register destination, Register source);
|
|
||||||
|
|
||||||
// CountLeadingZeros will corrupt the scratch register pair (eg. r0:r1)
|
// CountLeadingZeros will corrupt the scratch register pair (eg. r0:r1)
|
||||||
void CountLeadingZerosU32(Register dst, Register src,
|
void CountLeadingZerosU32(Register dst, Register src,
|
||||||
|
@ -283,8 +283,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
|||||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||||
Tagged_t compressed = ReadUnalignedValue<Tagged_t>(pc_);
|
Tagged_t compressed = ReadUnalignedValue<Tagged_t>(pc_);
|
||||||
DCHECK(!HAS_SMI_TAG(compressed));
|
DCHECK(!HAS_SMI_TAG(compressed));
|
||||||
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
|
Object obj(
|
||||||
compressed));
|
V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
|
||||||
// Embedding of compressed InstructionStream objects must not happen when
|
// Embedding of compressed InstructionStream objects must not happen when
|
||||||
// external code space is enabled, because Codes must be used
|
// external code space is enabled, because Codes must be used
|
||||||
// instead.
|
// instead.
|
||||||
|
@ -85,7 +85,7 @@ void MacroAssembler::LoadFromConstantsTable(Register destination,
|
|||||||
int constant_index) {
|
int constant_index) {
|
||||||
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
|
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
|
||||||
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
destination,
|
destination,
|
||||||
FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
|
FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
|
||||||
}
|
}
|
||||||
@ -174,7 +174,7 @@ void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) {
|
|||||||
|
|
||||||
void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
|
void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
|
||||||
if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
|
if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
|
||||||
DecompressTaggedPointer(destination, ReadOnlyRootPtr(index));
|
DecompressTagged(destination, ReadOnlyRootPtr(index));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
DCHECK(root_array_available_);
|
DCHECK(root_array_available_);
|
||||||
@ -220,24 +220,23 @@ void MacroAssembler::CompareRoot(Operand with, RootIndex index) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadMap(Register destination, Register object) {
|
void MacroAssembler::LoadMap(Register destination, Register object) {
|
||||||
LoadTaggedPointerField(destination,
|
LoadTaggedField(destination, FieldOperand(object, HeapObject::kMapOffset));
|
||||||
FieldOperand(object, HeapObject::kMapOffset));
|
|
||||||
#ifdef V8_MAP_PACKING
|
#ifdef V8_MAP_PACKING
|
||||||
UnpackMapWord(destination);
|
UnpackMapWord(destination);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadTaggedPointerField(Register destination,
|
void MacroAssembler::LoadTaggedField(Register destination,
|
||||||
Operand field_operand) {
|
Operand field_operand) {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
DecompressTaggedPointer(destination, field_operand);
|
DecompressTagged(destination, field_operand);
|
||||||
} else {
|
} else {
|
||||||
mov_tagged(destination, field_operand);
|
mov_tagged(destination, field_operand);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadTaggedPointerField(TaggedRegister destination,
|
void MacroAssembler::LoadTaggedField(TaggedRegister destination,
|
||||||
Operand field_operand) {
|
Operand field_operand) {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
movl(destination.reg(), field_operand);
|
movl(destination.reg(), field_operand);
|
||||||
} else {
|
} else {
|
||||||
@ -264,40 +263,10 @@ void MacroAssembler::LoadTaggedSignedField(Register destination,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::LoadAnyTaggedField(Register destination,
|
void MacroAssembler::PushTaggedField(Operand field_operand, Register scratch) {
|
||||||
Operand field_operand) {
|
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
|
||||||
DecompressAnyTagged(destination, field_operand);
|
|
||||||
} else {
|
|
||||||
mov_tagged(destination, field_operand);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::LoadAnyTaggedField(TaggedRegister destination,
|
|
||||||
Operand field_operand) {
|
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
|
||||||
movl(destination.reg(), field_operand);
|
|
||||||
} else {
|
|
||||||
mov_tagged(destination.reg(), field_operand);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::PushTaggedPointerField(Operand field_operand,
|
|
||||||
Register scratch) {
|
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
DCHECK(!field_operand.AddressUsesRegister(scratch));
|
DCHECK(!field_operand.AddressUsesRegister(scratch));
|
||||||
DecompressTaggedPointer(scratch, field_operand);
|
DecompressTagged(scratch, field_operand);
|
||||||
Push(scratch);
|
|
||||||
} else {
|
|
||||||
Push(field_operand);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::PushTaggedAnyField(Operand field_operand,
|
|
||||||
Register scratch) {
|
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
|
||||||
DCHECK(!field_operand.AddressUsesRegister(scratch));
|
|
||||||
DecompressAnyTagged(scratch, field_operand);
|
|
||||||
Push(scratch);
|
Push(scratch);
|
||||||
} else {
|
} else {
|
||||||
Push(field_operand);
|
Push(field_operand);
|
||||||
@ -357,29 +326,21 @@ void MacroAssembler::DecompressTaggedSigned(Register destination,
|
|||||||
movl(destination, field_operand);
|
movl(destination, field_operand);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
void MacroAssembler::DecompressTagged(Register destination,
|
||||||
Operand field_operand) {
|
Operand field_operand) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
movl(destination, field_operand);
|
movl(destination, field_operand);
|
||||||
addq(destination, kPtrComprCageBaseRegister);
|
addq(destination, kPtrComprCageBaseRegister);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
void MacroAssembler::DecompressTagged(Register destination, Register source) {
|
||||||
Register source) {
|
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
movl(destination, source);
|
movl(destination, source);
|
||||||
addq(destination, kPtrComprCageBaseRegister);
|
addq(destination, kPtrComprCageBaseRegister);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::DecompressAnyTagged(Register destination,
|
void MacroAssembler::DecompressTagged(Register destination,
|
||||||
Operand field_operand) {
|
Tagged_t immediate) {
|
||||||
ASM_CODE_COMMENT(this);
|
|
||||||
movl(destination, field_operand);
|
|
||||||
addq(destination, kPtrComprCageBaseRegister);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::DecompressTaggedPointer(Register destination,
|
|
||||||
Tagged_t immediate) {
|
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
leaq(destination,
|
leaq(destination,
|
||||||
Operand(kPtrComprCageBaseRegister, static_cast<int32_t>(immediate)));
|
Operand(kPtrComprCageBaseRegister, static_cast<int32_t>(immediate)));
|
||||||
@ -951,7 +912,7 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
|
|||||||
|
|
||||||
bind(&maybe_has_optimized_code);
|
bind(&maybe_has_optimized_code);
|
||||||
Register optimized_code_entry = flags;
|
Register optimized_code_entry = flags;
|
||||||
LoadAnyTaggedField(
|
LoadTaggedField(
|
||||||
optimized_code_entry,
|
optimized_code_entry,
|
||||||
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
|
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||||
TailCallOptimizedCodeSlot(this, optimized_code_entry, closure, r9,
|
TailCallOptimizedCodeSlot(this, optimized_code_entry, closure, r9,
|
||||||
@ -2803,7 +2764,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
|
|||||||
Register actual_parameter_count,
|
Register actual_parameter_count,
|
||||||
InvokeType type) {
|
InvokeType type) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
||||||
movzxwq(rbx,
|
movzxwq(rbx,
|
||||||
FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
|
FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -2816,8 +2777,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
|
|||||||
Register actual_parameter_count,
|
Register actual_parameter_count,
|
||||||
InvokeType type) {
|
InvokeType type) {
|
||||||
DCHECK_EQ(function, rdi);
|
DCHECK_EQ(function, rdi);
|
||||||
LoadTaggedPointerField(rsi,
|
LoadTaggedField(rsi, FieldOperand(function, JSFunction::kContextOffset));
|
||||||
FieldOperand(function, JSFunction::kContextOffset));
|
|
||||||
InvokeFunctionCode(rdi, new_target, expected_parameter_count,
|
InvokeFunctionCode(rdi, new_target, expected_parameter_count,
|
||||||
actual_parameter_count, type);
|
actual_parameter_count, type);
|
||||||
}
|
}
|
||||||
@ -2857,7 +2817,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
|||||||
// allow recompilation to take effect without changing any of the
|
// allow recompilation to take effect without changing any of the
|
||||||
// call sites.
|
// call sites.
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
||||||
LoadTaggedPointerField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
|
LoadTaggedField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case InvokeType::kCall:
|
case InvokeType::kCall:
|
||||||
CallCodeObject(rcx);
|
CallCodeObject(rcx);
|
||||||
@ -3227,11 +3187,11 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
|||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// Load native context.
|
// Load native context.
|
||||||
LoadMap(dst, rsi);
|
LoadMap(dst, rsi);
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
dst,
|
dst,
|
||||||
FieldOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
FieldOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
|
||||||
// Load value from native context.
|
// Load value from native context.
|
||||||
LoadTaggedPointerField(dst, Operand(dst, Context::SlotOffset(index)));
|
LoadTaggedField(dst, Operand(dst, Context::SlotOffset(index)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||||
@ -3240,7 +3200,7 @@ void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance distance) {
|
Label::Distance distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
scratch_and_result,
|
scratch_and_result,
|
||||||
FieldOperand(feedback_vector,
|
FieldOperand(feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt())));
|
||||||
@ -3413,8 +3373,7 @@ void MacroAssembler::ComputeCodeStartAddress(Register dst) {
|
|||||||
// 3. if it is not zero then it jumps to the builtin.
|
// 3. if it is not zero then it jumps to the builtin.
|
||||||
void MacroAssembler::BailoutIfDeoptimized(Register scratch) {
|
void MacroAssembler::BailoutIfDeoptimized(Register scratch) {
|
||||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||||
LoadTaggedPointerField(scratch,
|
LoadTaggedField(scratch, Operand(kJavaScriptCallCodeStartRegister, offset));
|
||||||
Operand(kJavaScriptCallCodeStartRegister, offset));
|
|
||||||
testl(FieldOperand(scratch, Code::kKindSpecificFlagsOffset),
|
testl(FieldOperand(scratch, Code::kKindSpecificFlagsOffset),
|
||||||
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
|
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
|
||||||
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||||
|
@ -583,35 +583,21 @@ class V8_EXPORT_PRIVATE MacroAssembler
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Pointer compression support
|
// Pointer compression support
|
||||||
|
|
||||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||||
// compression is enabled.
|
void LoadTaggedField(Register destination, Operand field_operand);
|
||||||
void LoadTaggedPointerField(Register destination, Operand field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing a HeapObject but does not decompress it when
|
// Loads a field containing any tagged value but does not decompress it when
|
||||||
// pointer compression is enabled.
|
// pointer compression is enabled.
|
||||||
void LoadTaggedPointerField(TaggedRegister destination,
|
void LoadTaggedField(TaggedRegister destination, Operand field_operand);
|
||||||
Operand field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing a Smi and decompresses it if pointer compression
|
// Loads a field containing a Smi and decompresses it if pointer compression
|
||||||
// is enabled.
|
// is enabled.
|
||||||
void LoadTaggedSignedField(Register destination, Operand field_operand);
|
void LoadTaggedSignedField(Register destination, Operand field_operand);
|
||||||
|
|
||||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
|
||||||
void LoadAnyTaggedField(Register destination, Operand field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing any tagged value but does not decompress it when
|
|
||||||
// pointer compression is enabled.
|
|
||||||
void LoadAnyTaggedField(TaggedRegister destination, Operand field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing a HeapObject, decompresses it if necessary and
|
|
||||||
// pushes full pointer to the stack. When pointer compression is enabled,
|
|
||||||
// uses |scratch| to decompress the value.
|
|
||||||
void PushTaggedPointerField(Operand field_operand, Register scratch);
|
|
||||||
|
|
||||||
// Loads a field containing any tagged value, decompresses it if necessary and
|
// Loads a field containing any tagged value, decompresses it if necessary and
|
||||||
// pushes the full pointer to the stack. When pointer compression is enabled,
|
// pushes the full pointer to the stack. When pointer compression is enabled,
|
||||||
// uses |scratch| to decompress the value.
|
// uses |scratch| to decompress the value.
|
||||||
void PushTaggedAnyField(Operand field_operand, Register scratch);
|
void PushTaggedField(Operand field_operand, Register scratch);
|
||||||
|
|
||||||
// Loads a field containing smi value and untags it.
|
// Loads a field containing smi value and untags it.
|
||||||
void SmiUntagField(Register dst, Operand src);
|
void SmiUntagField(Register dst, Operand src);
|
||||||
@ -626,10 +612,9 @@ class V8_EXPORT_PRIVATE MacroAssembler
|
|||||||
|
|
||||||
// The following macros work even when pointer compression is not enabled.
|
// The following macros work even when pointer compression is not enabled.
|
||||||
void DecompressTaggedSigned(Register destination, Operand field_operand);
|
void DecompressTaggedSigned(Register destination, Operand field_operand);
|
||||||
void DecompressTaggedPointer(Register destination, Operand field_operand);
|
void DecompressTagged(Register destination, Operand field_operand);
|
||||||
void DecompressTaggedPointer(Register destination, Register source);
|
void DecompressTagged(Register destination, Register source);
|
||||||
void DecompressTaggedPointer(Register destination, Tagged_t immediate);
|
void DecompressTagged(Register destination, Tagged_t immediate);
|
||||||
void DecompressAnyTagged(Register destination, Operand field_operand);
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// V8 Sandbox support
|
// V8 Sandbox support
|
||||||
|
@ -69,8 +69,8 @@ Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
|
|||||||
|
|
||||||
// static
|
// static
|
||||||
template <typename TOnHeapAddress>
|
template <typename TOnHeapAddress>
|
||||||
Address V8HeapCompressionScheme::DecompressTaggedPointer(
|
Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
Tagged_t raw_value) {
|
||||||
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
|
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
|
||||||
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
|
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
|
||||||
V8_ASSUME((base_ & kPtrComprCageBaseMask) == base_);
|
V8_ASSUME((base_ & kPtrComprCageBaseMask) == base_);
|
||||||
@ -90,13 +90,6 @@ Address V8HeapCompressionScheme::DecompressTaggedPointer(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
|
||||||
template <typename TOnHeapAddress>
|
|
||||||
Address V8HeapCompressionScheme::DecompressTaggedAny(
|
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
|
||||||
return DecompressTaggedPointer(on_heap_addr, raw_value);
|
|
||||||
}
|
|
||||||
|
|
||||||
// static
|
// static
|
||||||
template <typename ProcessPointerCallback>
|
template <typename ProcessPointerCallback>
|
||||||
void V8HeapCompressionScheme::ProcessIntermediatePointers(
|
void V8HeapCompressionScheme::ProcessIntermediatePointers(
|
||||||
@ -105,10 +98,10 @@ void V8HeapCompressionScheme::ProcessIntermediatePointers(
|
|||||||
// If pointer compression is enabled, we may have random compressed pointers
|
// If pointer compression is enabled, we may have random compressed pointers
|
||||||
// on the stack that may be used for subsequent operations.
|
// on the stack that may be used for subsequent operations.
|
||||||
// Extract, decompress and trace both halfwords.
|
// Extract, decompress and trace both halfwords.
|
||||||
Address decompressed_low = V8HeapCompressionScheme::DecompressTaggedPointer(
|
Address decompressed_low = V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base, static_cast<Tagged_t>(raw_value));
|
cage_base, static_cast<Tagged_t>(raw_value));
|
||||||
callback(decompressed_low);
|
callback(decompressed_low);
|
||||||
Address decompressed_high = V8HeapCompressionScheme::DecompressTaggedPointer(
|
Address decompressed_high = V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base,
|
cage_base,
|
||||||
static_cast<Tagged_t>(raw_value >> (sizeof(Tagged_t) * CHAR_BIT)));
|
static_cast<Tagged_t>(raw_value >> (sizeof(Tagged_t) * CHAR_BIT)));
|
||||||
callback(decompressed_high);
|
callback(decompressed_high);
|
||||||
@ -165,7 +158,7 @@ Address ExternalCodeCompressionScheme::DecompressTaggedSigned(
|
|||||||
|
|
||||||
// static
|
// static
|
||||||
template <typename TOnHeapAddress>
|
template <typename TOnHeapAddress>
|
||||||
Address ExternalCodeCompressionScheme::DecompressTaggedPointer(
|
Address ExternalCodeCompressionScheme::DecompressTagged(
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
||||||
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
|
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
|
||||||
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
|
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
|
||||||
@ -186,13 +179,6 @@ Address ExternalCodeCompressionScheme::DecompressTaggedPointer(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
|
||||||
template <typename TOnHeapAddress>
|
|
||||||
Address ExternalCodeCompressionScheme::DecompressTaggedAny(
|
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
|
||||||
return DecompressTaggedPointer(on_heap_addr, raw_value);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // V8_EXTERNAL_CODE_SPACE
|
#endif // V8_EXTERNAL_CODE_SPACE
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -229,15 +215,8 @@ Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
|
|||||||
|
|
||||||
// static
|
// static
|
||||||
template <typename TOnHeapAddress>
|
template <typename TOnHeapAddress>
|
||||||
Address V8HeapCompressionScheme::DecompressTaggedPointer(
|
Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
Tagged_t raw_value) {
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
|
|
||||||
// static
|
|
||||||
template <typename TOnHeapAddress>
|
|
||||||
Address V8HeapCompressionScheme::DecompressTaggedAny(
|
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,15 +29,10 @@ class V8HeapCompressionScheme {
|
|||||||
// Decompresses smi value.
|
// Decompresses smi value.
|
||||||
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
|
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
|
||||||
|
|
||||||
// Decompresses weak or strong heap object pointer or forwarding pointer,
|
|
||||||
// preserving both weak- and smi- tags.
|
|
||||||
template <typename TOnHeapAddress>
|
|
||||||
V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
|
|
||||||
Tagged_t raw_value);
|
|
||||||
// Decompresses any tagged value, preserving both weak- and smi- tags.
|
// Decompresses any tagged value, preserving both weak- and smi- tags.
|
||||||
template <typename TOnHeapAddress>
|
template <typename TOnHeapAddress>
|
||||||
V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
|
V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||||
Tagged_t raw_value);
|
Tagged_t raw_value);
|
||||||
|
|
||||||
// Given a 64bit raw value, found on the stack, calls the callback function
|
// Given a 64bit raw value, found on the stack, calls the callback function
|
||||||
// with all possible pointers that may be "contained" in compressed form in
|
// with all possible pointers that may be "contained" in compressed form in
|
||||||
@ -82,15 +77,10 @@ class ExternalCodeCompressionScheme {
|
|||||||
// Decompresses smi value.
|
// Decompresses smi value.
|
||||||
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
|
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
|
||||||
|
|
||||||
// Decompresses weak or strong heap object pointer or forwarding pointer,
|
|
||||||
// preserving both weak- and smi- tags.
|
|
||||||
template <typename TOnHeapAddress>
|
|
||||||
V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
|
|
||||||
Tagged_t raw_value);
|
|
||||||
// Decompresses any tagged value, preserving both weak- and smi- tags.
|
// Decompresses any tagged value, preserving both weak- and smi- tags.
|
||||||
template <typename TOnHeapAddress>
|
template <typename TOnHeapAddress>
|
||||||
V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
|
V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||||
Tagged_t raw_value);
|
Tagged_t raw_value);
|
||||||
|
|
||||||
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||||
// Process-wide cage base value used for decompression.
|
// Process-wide cage base value used for decompression.
|
||||||
|
@ -284,7 +284,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
|
|
||||||
void Generate() final {
|
void Generate() final {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
__ DecompressTaggedPointer(value_, value_);
|
__ DecompressTagged(value_, value_);
|
||||||
}
|
}
|
||||||
__ CheckPageFlag(
|
__ CheckPageFlag(
|
||||||
value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
||||||
@ -752,14 +752,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
// Check the function's context matches the context argument.
|
// Check the function's context matches the context argument.
|
||||||
UseScratchRegisterScope scope(masm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register temp = scope.AcquireX();
|
Register temp = scope.AcquireX();
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(temp,
|
||||||
temp, FieldMemOperand(func, JSFunction::kContextOffset));
|
FieldMemOperand(func, JSFunction::kContextOffset));
|
||||||
__ cmp(cp, temp);
|
__ cmp(cp, temp);
|
||||||
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
||||||
}
|
}
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(x2,
|
__ LoadTaggedField(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||||
FieldMemOperand(func, JSFunction::kCodeOffset));
|
|
||||||
__ CallCodeObject(x2);
|
__ CallCodeObject(x2);
|
||||||
RecordCallPosition(instr);
|
RecordCallPosition(instr);
|
||||||
frame_access_state()->ClearSPDelta();
|
frame_access_state()->ClearSPDelta();
|
||||||
@ -1890,23 +1889,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArm64LdrDecompressTaggedSigned:
|
case kArm64LdrDecompressTaggedSigned:
|
||||||
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
|
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
|
||||||
break;
|
break;
|
||||||
case kArm64LdrDecompressTaggedPointer:
|
case kArm64LdrDecompressTagged:
|
||||||
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
|
__ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
|
||||||
break;
|
|
||||||
case kArm64LdrDecompressAnyTagged:
|
|
||||||
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
|
|
||||||
break;
|
break;
|
||||||
case kArm64LdarDecompressTaggedSigned:
|
case kArm64LdarDecompressTaggedSigned:
|
||||||
__ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
|
__ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
|
||||||
i.InputRegister(1), i.TempRegister(0));
|
i.InputRegister(1), i.TempRegister(0));
|
||||||
break;
|
break;
|
||||||
case kArm64LdarDecompressTaggedPointer:
|
case kArm64LdarDecompressTagged:
|
||||||
__ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
|
__ AtomicDecompressTagged(i.OutputRegister(), i.InputRegister(0),
|
||||||
i.InputRegister(1), i.TempRegister(0));
|
i.InputRegister(1), i.TempRegister(0));
|
||||||
break;
|
|
||||||
case kArm64LdarDecompressAnyTagged:
|
|
||||||
__ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
|
|
||||||
i.InputRegister(1), i.TempRegister(0));
|
|
||||||
break;
|
break;
|
||||||
case kArm64LdrDecodeSandboxedPointer:
|
case kArm64LdrDecodeSandboxedPointer:
|
||||||
__ LoadSandboxedPointerField(i.OutputRegister(), i.MemoryOperand());
|
__ LoadSandboxedPointerField(i.OutputRegister(), i.MemoryOperand());
|
||||||
|
@ -199,11 +199,9 @@ namespace compiler {
|
|||||||
V(Arm64Float64MoveU64) \
|
V(Arm64Float64MoveU64) \
|
||||||
V(Arm64U64MoveFloat64) \
|
V(Arm64U64MoveFloat64) \
|
||||||
V(Arm64LdrDecompressTaggedSigned) \
|
V(Arm64LdrDecompressTaggedSigned) \
|
||||||
V(Arm64LdrDecompressTaggedPointer) \
|
V(Arm64LdrDecompressTagged) \
|
||||||
V(Arm64LdrDecompressAnyTagged) \
|
|
||||||
V(Arm64LdarDecompressTaggedSigned) \
|
V(Arm64LdarDecompressTaggedSigned) \
|
||||||
V(Arm64LdarDecompressTaggedPointer) \
|
V(Arm64LdarDecompressTagged) \
|
||||||
V(Arm64LdarDecompressAnyTagged) \
|
|
||||||
V(Arm64StrCompressTagged) \
|
V(Arm64StrCompressTagged) \
|
||||||
V(Arm64StlrCompressTagged) \
|
V(Arm64StlrCompressTagged) \
|
||||||
V(Arm64LdrDecodeSandboxedPointer) \
|
V(Arm64LdrDecodeSandboxedPointer) \
|
||||||
|
@ -315,11 +315,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
|||||||
case kArm64LdrW:
|
case kArm64LdrW:
|
||||||
case kArm64Ldr:
|
case kArm64Ldr:
|
||||||
case kArm64LdrDecompressTaggedSigned:
|
case kArm64LdrDecompressTaggedSigned:
|
||||||
case kArm64LdrDecompressTaggedPointer:
|
case kArm64LdrDecompressTagged:
|
||||||
case kArm64LdrDecompressAnyTagged:
|
|
||||||
case kArm64LdarDecompressTaggedSigned:
|
case kArm64LdarDecompressTaggedSigned:
|
||||||
case kArm64LdarDecompressTaggedPointer:
|
case kArm64LdarDecompressTagged:
|
||||||
case kArm64LdarDecompressAnyTagged:
|
|
||||||
case kArm64LdrDecodeSandboxedPointer:
|
case kArm64LdrDecodeSandboxedPointer:
|
||||||
case kArm64Peek:
|
case kArm64Peek:
|
||||||
case kArm64LoadSplat:
|
case kArm64LoadSplat:
|
||||||
@ -431,8 +429,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
|||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
case kArm64LdrDecompressTaggedSigned:
|
case kArm64LdrDecompressTaggedSigned:
|
||||||
case kArm64LdrDecompressTaggedPointer:
|
case kArm64LdrDecompressTagged:
|
||||||
case kArm64LdrDecompressAnyTagged:
|
|
||||||
case kArm64Ldr:
|
case kArm64Ldr:
|
||||||
case kArm64LdrD:
|
case kArm64LdrD:
|
||||||
case kArm64LdrS:
|
case kArm64LdrS:
|
||||||
|
@ -843,11 +843,8 @@ void InstructionSelector::VisitLoad(Node* node) {
|
|||||||
immediate_mode = kLoadStoreImm32;
|
immediate_mode = kLoadStoreImm32;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTaggedPointer:
|
case MachineRepresentation::kTaggedPointer:
|
||||||
opcode = kArm64LdrDecompressTaggedPointer;
|
|
||||||
immediate_mode = kLoadStoreImm32;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kTagged:
|
case MachineRepresentation::kTagged:
|
||||||
opcode = kArm64LdrDecompressAnyTagged;
|
opcode = kArm64LdrDecompressTagged;
|
||||||
immediate_mode = kLoadStoreImm32;
|
immediate_mode = kLoadStoreImm32;
|
||||||
break;
|
break;
|
||||||
#else
|
#else
|
||||||
@ -2773,10 +2770,10 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
|
|||||||
code = kArm64LdarDecompressTaggedSigned;
|
code = kArm64LdarDecompressTaggedSigned;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTaggedPointer:
|
case MachineRepresentation::kTaggedPointer:
|
||||||
code = kArm64LdarDecompressTaggedPointer;
|
code = kArm64LdarDecompressTagged;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTagged:
|
case MachineRepresentation::kTagged:
|
||||||
code = kArm64LdarDecompressAnyTagged;
|
code = kArm64LdarDecompressTagged;
|
||||||
break;
|
break;
|
||||||
#else
|
#else
|
||||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
@ -172,7 +172,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
void Generate() final {
|
void Generate() final {
|
||||||
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
|
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
__ DecompressTaggedPointer(value_, value_);
|
__ DecompressTagged(value_, value_);
|
||||||
}
|
}
|
||||||
__ CheckPageFlag(
|
__ CheckPageFlag(
|
||||||
value_, scratch0_,
|
value_, scratch0_,
|
||||||
@ -793,8 +793,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset),
|
||||||
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
|
r0);
|
||||||
__ LoadS32(r11, FieldMemOperand(r11, Code::kKindSpecificFlagsOffset), r0);
|
__ LoadS32(r11, FieldMemOperand(r11, Code::kKindSpecificFlagsOffset), r0);
|
||||||
__ TestBit(r11, InstructionStream::kMarkedForDeoptimizationBit);
|
__ TestBit(r11, InstructionStream::kMarkedForDeoptimizationBit);
|
||||||
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||||
@ -908,14 +908,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
Register func = i.InputRegister(0);
|
Register func = i.InputRegister(0);
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
// Check the function's context matches the context argument.
|
// Check the function's context matches the context argument.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset), r0);
|
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset), r0);
|
||||||
__ CmpS64(cp, kScratchReg);
|
__ CmpS64(cp, kScratchReg);
|
||||||
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
||||||
}
|
}
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r5, FieldMemOperand(func, JSFunction::kCodeOffset),
|
||||||
r5, FieldMemOperand(func, JSFunction::kCodeOffset), r0);
|
r0);
|
||||||
__ CallCodeObject(r5);
|
__ CallCodeObject(r5);
|
||||||
RecordCallPosition(instr);
|
RecordCallPosition(instr);
|
||||||
DCHECK_EQ(LeaveRC, i.OutputRCBit());
|
DCHECK_EQ(LeaveRC, i.OutputRCBit());
|
||||||
@ -2880,13 +2880,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kPPC_LoadDecompressTaggedPointer: {
|
case kPPC_LoadDecompressTagged: {
|
||||||
CHECK(instr->HasOutput());
|
|
||||||
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
|
||||||
__ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case kPPC_LoadDecompressAnyTagged: {
|
|
||||||
CHECK(instr->HasOutput());
|
CHECK(instr->HasOutput());
|
||||||
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
||||||
__ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister);
|
__ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister);
|
||||||
|
@ -411,8 +411,7 @@ namespace compiler {
|
|||||||
V(PPC_S128Store64Lane) \
|
V(PPC_S128Store64Lane) \
|
||||||
V(PPC_StoreCompressTagged) \
|
V(PPC_StoreCompressTagged) \
|
||||||
V(PPC_LoadDecompressTaggedSigned) \
|
V(PPC_LoadDecompressTaggedSigned) \
|
||||||
V(PPC_LoadDecompressTaggedPointer) \
|
V(PPC_LoadDecompressTagged)
|
||||||
V(PPC_LoadDecompressAnyTagged)
|
|
||||||
|
|
||||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||||
// Many instructions support multiple addressing modes. Addressing modes
|
// Many instructions support multiple addressing modes. Addressing modes
|
||||||
|
@ -331,8 +331,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
|||||||
case kPPC_LoadSimd128:
|
case kPPC_LoadSimd128:
|
||||||
case kPPC_Peek:
|
case kPPC_Peek:
|
||||||
case kPPC_LoadDecompressTaggedSigned:
|
case kPPC_LoadDecompressTaggedSigned:
|
||||||
case kPPC_LoadDecompressTaggedPointer:
|
case kPPC_LoadDecompressTagged:
|
||||||
case kPPC_LoadDecompressAnyTagged:
|
|
||||||
case kPPC_S128Load8Splat:
|
case kPPC_S128Load8Splat:
|
||||||
case kPPC_S128Load16Splat:
|
case kPPC_S128Load16Splat:
|
||||||
case kPPC_S128Load32Splat:
|
case kPPC_S128Load32Splat:
|
||||||
|
@ -214,10 +214,10 @@ static void VisitLoadCommon(InstructionSelector* selector, Node* node,
|
|||||||
opcode = kPPC_LoadDecompressTaggedSigned;
|
opcode = kPPC_LoadDecompressTaggedSigned;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTaggedPointer:
|
case MachineRepresentation::kTaggedPointer:
|
||||||
opcode = kPPC_LoadDecompressTaggedPointer;
|
opcode = kPPC_LoadDecompressTagged;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTagged:
|
case MachineRepresentation::kTagged:
|
||||||
opcode = kPPC_LoadDecompressAnyTagged;
|
opcode = kPPC_LoadDecompressTagged;
|
||||||
break;
|
break;
|
||||||
#else
|
#else
|
||||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
@ -173,7 +173,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
void Generate() final {
|
void Generate() final {
|
||||||
#if V8_TARGET_ARCH_RISCV64
|
#if V8_TARGET_ARCH_RISCV64
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
__ DecompressTaggedPointer(value_, value_);
|
__ DecompressTagged(value_, value_);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
__ CheckPageFlag(
|
__ CheckPageFlag(
|
||||||
@ -628,8 +628,8 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
|||||||
// 3. if it is not zero then it jumps to the builtin.
|
// 3. if it is not zero then it jumps to the builtin.
|
||||||
void CodeGenerator::BailoutIfDeoptimized() {
|
void CodeGenerator::BailoutIfDeoptimized() {
|
||||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(kScratchReg,
|
||||||
kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
||||||
__ Lw(kScratchReg,
|
__ Lw(kScratchReg,
|
||||||
FieldMemOperand(kScratchReg, Code::kKindSpecificFlagsOffset));
|
FieldMemOperand(kScratchReg, Code::kKindSpecificFlagsOffset));
|
||||||
__ And(kScratchReg, kScratchReg,
|
__ And(kScratchReg, kScratchReg,
|
||||||
@ -722,14 +722,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
Register func = i.InputOrZeroRegister(0);
|
Register func = i.InputOrZeroRegister(0);
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
// Check the function's context matches the context argument.
|
// Check the function's context matches the context argument.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(kScratchReg,
|
||||||
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
|
FieldMemOperand(func, JSFunction::kContextOffset));
|
||||||
__ Assert(eq, AbortReason::kWrongFunctionContext, cp,
|
__ Assert(eq, AbortReason::kWrongFunctionContext, cp,
|
||||||
Operand(kScratchReg));
|
Operand(kScratchReg));
|
||||||
}
|
}
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(a2,
|
__ LoadTaggedField(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||||
FieldMemOperand(func, JSFunction::kCodeOffset));
|
|
||||||
__ CallCodeObject(a2);
|
__ CallCodeObject(a2);
|
||||||
RecordCallPosition(instr);
|
RecordCallPosition(instr);
|
||||||
frame_access_state()->ClearSPDelta();
|
frame_access_state()->ClearSPDelta();
|
||||||
@ -2194,18 +2193,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ DecompressTaggedSigned(result, operand);
|
__ DecompressTaggedSigned(result, operand);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kRiscvLoadDecompressTaggedPointer: {
|
case kRiscvLoadDecompressTagged: {
|
||||||
CHECK(instr->HasOutput());
|
CHECK(instr->HasOutput());
|
||||||
Register result = i.OutputRegister();
|
Register result = i.OutputRegister();
|
||||||
MemOperand operand = i.MemoryOperand();
|
MemOperand operand = i.MemoryOperand();
|
||||||
__ DecompressTaggedPointer(result, operand);
|
__ DecompressTagged(result, operand);
|
||||||
break;
|
|
||||||
}
|
|
||||||
case kRiscvLoadDecompressAnyTagged: {
|
|
||||||
CHECK(instr->HasOutput());
|
|
||||||
Register result = i.OutputRegister();
|
|
||||||
MemOperand operand = i.MemoryOperand();
|
|
||||||
__ DecompressAnyTagged(result, operand);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -64,8 +64,7 @@ namespace compiler {
|
|||||||
V(RiscvWord64AtomicExchangeUint64) \
|
V(RiscvWord64AtomicExchangeUint64) \
|
||||||
V(RiscvStoreCompressTagged) \
|
V(RiscvStoreCompressTagged) \
|
||||||
V(RiscvLoadDecompressTaggedSigned) \
|
V(RiscvLoadDecompressTaggedSigned) \
|
||||||
V(RiscvLoadDecompressTaggedPointer) \
|
V(RiscvLoadDecompressTagged) \
|
||||||
V(RiscvLoadDecompressAnyTagged) \
|
|
||||||
V(RiscvWord64AtomicCompareExchangeUint64)
|
V(RiscvWord64AtomicCompareExchangeUint64)
|
||||||
#elif V8_TARGET_ARCH_RISCV32
|
#elif V8_TARGET_ARCH_RISCV32
|
||||||
#define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \
|
#define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \
|
||||||
|
@ -377,8 +377,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
|||||||
case kRiscvUlwu:
|
case kRiscvUlwu:
|
||||||
case kRiscvWord64AtomicLoadUint64:
|
case kRiscvWord64AtomicLoadUint64:
|
||||||
case kRiscvLoadDecompressTaggedSigned:
|
case kRiscvLoadDecompressTaggedSigned:
|
||||||
case kRiscvLoadDecompressTaggedPointer:
|
case kRiscvLoadDecompressTagged:
|
||||||
case kRiscvLoadDecompressAnyTagged:
|
|
||||||
#elif V8_TARGET_ARCH_RISCV32
|
#elif V8_TARGET_ARCH_RISCV32
|
||||||
case kRiscvWord32AtomicPairLoad:
|
case kRiscvWord32AtomicPairLoad:
|
||||||
#endif
|
#endif
|
||||||
|
@ -280,10 +280,10 @@ void InstructionSelector::VisitLoad(Node* node) {
|
|||||||
opcode = kRiscvLoadDecompressTaggedSigned;
|
opcode = kRiscvLoadDecompressTaggedSigned;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTaggedPointer:
|
case MachineRepresentation::kTaggedPointer:
|
||||||
opcode = kRiscvLoadDecompressTaggedPointer;
|
opcode = kRiscvLoadDecompressTagged;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTagged:
|
case MachineRepresentation::kTagged:
|
||||||
opcode = kRiscvLoadDecompressAnyTagged;
|
opcode = kRiscvLoadDecompressTagged;
|
||||||
break;
|
break;
|
||||||
#else
|
#else
|
||||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
@ -1938,10 +1938,10 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
|
|||||||
opcode = kRiscv64LdDecompressTaggedSigned;
|
opcode = kRiscv64LdDecompressTaggedSigned;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTaggedPointer:
|
case MachineRepresentation::kTaggedPointer:
|
||||||
opcode = kRiscv64LdDecompressTaggedPointer;
|
opcode = kRiscv64LdDecompressTagged;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTagged:
|
case MachineRepresentation::kTagged:
|
||||||
opcode = kRiscv64LdDecompressAnyTagged;
|
opcode = kRiscv64LdDecompressTagged;
|
||||||
break;
|
break;
|
||||||
#else
|
#else
|
||||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
@ -209,7 +209,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
|
|
||||||
void Generate() final {
|
void Generate() final {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
__ DecompressTaggedPointer(value_, value_);
|
__ DecompressTagged(value_, value_);
|
||||||
}
|
}
|
||||||
__ CheckPageFlag(
|
__ CheckPageFlag(
|
||||||
value_, scratch0_,
|
value_, scratch0_,
|
||||||
@ -1134,8 +1134,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(ip, MemOperand(kJavaScriptCallCodeStartRegister, offset),
|
||||||
ip, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
|
r0);
|
||||||
__ LoadS32(ip, FieldMemOperand(ip, Code::kKindSpecificFlagsOffset));
|
__ LoadS32(ip, FieldMemOperand(ip, Code::kKindSpecificFlagsOffset));
|
||||||
__ TestBit(ip, InstructionStream::kMarkedForDeoptimizationBit);
|
__ TestBit(ip, InstructionStream::kMarkedForDeoptimizationBit);
|
||||||
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||||
@ -1240,14 +1240,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
Register func = i.InputRegister(0);
|
Register func = i.InputRegister(0);
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
// Check the function's context matches the context argument.
|
// Check the function's context matches the context argument.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(kScratchReg,
|
||||||
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
|
FieldMemOperand(func, JSFunction::kContextOffset));
|
||||||
__ CmpS64(cp, kScratchReg);
|
__ CmpS64(cp, kScratchReg);
|
||||||
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
||||||
}
|
}
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(r4,
|
__ LoadTaggedField(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||||
FieldMemOperand(func, JSFunction::kCodeOffset));
|
|
||||||
__ CallCodeObject(r4);
|
__ CallCodeObject(r4);
|
||||||
RecordCallPosition(instr);
|
RecordCallPosition(instr);
|
||||||
frame_access_state()->ClearSPDelta();
|
frame_access_state()->ClearSPDelta();
|
||||||
@ -3175,14 +3174,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
|
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kS390_LoadDecompressTaggedPointer: {
|
case kS390_LoadDecompressTagged: {
|
||||||
CHECK(instr->HasOutput());
|
CHECK(instr->HasOutput());
|
||||||
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
|
__ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
|
||||||
break;
|
|
||||||
}
|
|
||||||
case kS390_LoadDecompressAnyTagged: {
|
|
||||||
CHECK(instr->HasOutput());
|
|
||||||
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -398,8 +398,7 @@ namespace compiler {
|
|||||||
V(S390_LoadSimd128) \
|
V(S390_LoadSimd128) \
|
||||||
V(S390_StoreCompressTagged) \
|
V(S390_StoreCompressTagged) \
|
||||||
V(S390_LoadDecompressTaggedSigned) \
|
V(S390_LoadDecompressTaggedSigned) \
|
||||||
V(S390_LoadDecompressTaggedPointer) \
|
V(S390_LoadDecompressTagged)
|
||||||
V(S390_LoadDecompressAnyTagged)
|
|
||||||
|
|
||||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||||
// Many instructions support multiple addressing modes. Addressing modes
|
// Many instructions support multiple addressing modes. Addressing modes
|
||||||
|
@ -358,8 +358,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
|||||||
case kS390_LoadReverseSimd128:
|
case kS390_LoadReverseSimd128:
|
||||||
case kS390_Peek:
|
case kS390_Peek:
|
||||||
case kS390_LoadDecompressTaggedSigned:
|
case kS390_LoadDecompressTaggedSigned:
|
||||||
case kS390_LoadDecompressTaggedPointer:
|
case kS390_LoadDecompressTagged:
|
||||||
case kS390_LoadDecompressAnyTagged:
|
|
||||||
case kS390_S128Load8Splat:
|
case kS390_S128Load8Splat:
|
||||||
case kS390_S128Load16Splat:
|
case kS390_S128Load16Splat:
|
||||||
case kS390_S128Load32Splat:
|
case kS390_S128Load32Splat:
|
||||||
|
@ -303,10 +303,10 @@ ArchOpcode SelectLoadOpcode(LoadRepresentation load_rep) {
|
|||||||
opcode = kS390_LoadDecompressTaggedSigned;
|
opcode = kS390_LoadDecompressTaggedSigned;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTaggedPointer:
|
case MachineRepresentation::kTaggedPointer:
|
||||||
opcode = kS390_LoadDecompressTaggedPointer;
|
opcode = kS390_LoadDecompressTagged;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTagged:
|
case MachineRepresentation::kTagged:
|
||||||
opcode = kS390_LoadDecompressAnyTagged;
|
opcode = kS390_LoadDecompressTagged;
|
||||||
break;
|
break;
|
||||||
#else
|
#else
|
||||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
@ -293,7 +293,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
|
|
||||||
void Generate() final {
|
void Generate() final {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
__ DecompressTaggedPointer(value_, value_);
|
__ DecompressTagged(value_, value_);
|
||||||
}
|
}
|
||||||
__ CheckPageFlag(
|
__ CheckPageFlag(
|
||||||
value_, scratch0_,
|
value_, scratch0_,
|
||||||
@ -1358,8 +1358,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ Assert(equal, AbortReason::kWrongFunctionContext);
|
__ Assert(equal, AbortReason::kWrongFunctionContext);
|
||||||
}
|
}
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(rcx,
|
__ LoadTaggedField(rcx, FieldOperand(func, JSFunction::kCodeOffset));
|
||||||
FieldOperand(func, JSFunction::kCodeOffset));
|
|
||||||
__ CallCodeObject(rcx);
|
__ CallCodeObject(rcx);
|
||||||
frame_access_state()->ClearSPDelta();
|
frame_access_state()->ClearSPDelta();
|
||||||
RecordCallPosition(instr);
|
RecordCallPosition(instr);
|
||||||
@ -2576,18 +2575,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
DetermineStubCallMode(), kTaggedSize);
|
DetermineStubCallMode(), kTaggedSize);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kX64MovqDecompressTaggedPointer: {
|
case kX64MovqDecompressTagged: {
|
||||||
CHECK(instr->HasOutput());
|
CHECK(instr->HasOutput());
|
||||||
Operand address(i.MemoryOperand());
|
Operand address(i.MemoryOperand());
|
||||||
__ DecompressTaggedPointer(i.OutputRegister(), address);
|
__ DecompressTagged(i.OutputRegister(), address);
|
||||||
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
|
|
||||||
DetermineStubCallMode(), kTaggedSize);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case kX64MovqDecompressAnyTagged: {
|
|
||||||
CHECK(instr->HasOutput());
|
|
||||||
Operand address(i.MemoryOperand());
|
|
||||||
__ DecompressAnyTagged(i.OutputRegister(), address);
|
|
||||||
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
|
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
|
||||||
DetermineStubCallMode(), kTaggedSize);
|
DetermineStubCallMode(), kTaggedSize);
|
||||||
break;
|
break;
|
||||||
|
@ -173,8 +173,7 @@ namespace compiler {
|
|||||||
V(X64Float32Abs) \
|
V(X64Float32Abs) \
|
||||||
V(X64Float32Neg) \
|
V(X64Float32Neg) \
|
||||||
V(X64MovqDecompressTaggedSigned) \
|
V(X64MovqDecompressTaggedSigned) \
|
||||||
V(X64MovqDecompressTaggedPointer) \
|
V(X64MovqDecompressTagged) \
|
||||||
V(X64MovqDecompressAnyTagged) \
|
|
||||||
V(X64MovqCompressTagged) \
|
V(X64MovqCompressTagged) \
|
||||||
V(X64MovqEncodeSandboxedPointer) \
|
V(X64MovqEncodeSandboxedPointer) \
|
||||||
V(X64MovqDecodeSandboxedPointer) \
|
V(X64MovqDecodeSandboxedPointer) \
|
||||||
|
@ -403,8 +403,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
|||||||
}
|
}
|
||||||
|
|
||||||
case kX64MovqDecompressTaggedSigned:
|
case kX64MovqDecompressTaggedSigned:
|
||||||
case kX64MovqDecompressTaggedPointer:
|
case kX64MovqDecompressTagged:
|
||||||
case kX64MovqDecompressAnyTagged:
|
|
||||||
case kX64MovqCompressTagged:
|
case kX64MovqCompressTagged:
|
||||||
case kX64MovqDecodeSandboxedPointer:
|
case kX64MovqDecodeSandboxedPointer:
|
||||||
case kX64MovqEncodeSandboxedPointer:
|
case kX64MovqEncodeSandboxedPointer:
|
||||||
|
@ -318,10 +318,8 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
|
|||||||
opcode = kX64MovqDecompressTaggedSigned;
|
opcode = kX64MovqDecompressTaggedSigned;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTaggedPointer:
|
case MachineRepresentation::kTaggedPointer:
|
||||||
opcode = kX64MovqDecompressTaggedPointer;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kTagged:
|
case MachineRepresentation::kTagged:
|
||||||
opcode = kX64MovqDecompressAnyTagged;
|
opcode = kX64MovqDecompressTagged;
|
||||||
break;
|
break;
|
||||||
#else
|
#else
|
||||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
@ -1428,7 +1428,7 @@ int TranslatedState::CreateNextTranslatedValue(
|
|||||||
|
|
||||||
Address TranslatedState::DecompressIfNeeded(intptr_t value) {
|
Address TranslatedState::DecompressIfNeeded(intptr_t value) {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
return V8HeapCompressionScheme::DecompressTaggedAny(
|
return V8HeapCompressionScheme::DecompressTagged(
|
||||||
isolate(), static_cast<uint32_t>(value));
|
isolate(), static_cast<uint32_t>(value));
|
||||||
} else {
|
} else {
|
||||||
return value;
|
return value;
|
||||||
|
@ -1973,8 +1973,7 @@ void WasmStruct::WasmStructPrint(std::ostream& os) {
|
|||||||
case wasm::kRtt: {
|
case wasm::kRtt: {
|
||||||
Tagged_t raw = base::ReadUnalignedValue<Tagged_t>(field_address);
|
Tagged_t raw = base::ReadUnalignedValue<Tagged_t>(field_address);
|
||||||
#if V8_COMPRESS_POINTERS
|
#if V8_COMPRESS_POINTERS
|
||||||
Address obj =
|
Address obj = V8HeapCompressionScheme::DecompressTagged(address(), raw);
|
||||||
V8HeapCompressionScheme::DecompressTaggedPointer(address(), raw);
|
|
||||||
#else
|
#else
|
||||||
Address obj = raw;
|
Address obj = raw;
|
||||||
#endif
|
#endif
|
||||||
@ -3000,7 +2999,7 @@ inline i::Object GetObjectFromRaw(void* object) {
|
|||||||
if (RoundDown<i::kPtrComprCageBaseAlignment>(object_ptr) == i::kNullAddress) {
|
if (RoundDown<i::kPtrComprCageBaseAlignment>(object_ptr) == i::kNullAddress) {
|
||||||
// Try to decompress pointer.
|
// Try to decompress pointer.
|
||||||
i::Isolate* isolate = i::Isolate::Current();
|
i::Isolate* isolate = i::Isolate::Current();
|
||||||
object_ptr = i::V8HeapCompressionScheme::DecompressTaggedAny(
|
object_ptr = i::V8HeapCompressionScheme::DecompressTagged(
|
||||||
isolate, static_cast<i::Tagged_t>(object_ptr));
|
isolate, static_cast<i::Tagged_t>(object_ptr));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1109,7 +1109,7 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
|
|||||||
if (!HAS_SMI_TAG(value) && value <= 0xffffffff) {
|
if (!HAS_SMI_TAG(value) && value <= 0xffffffff) {
|
||||||
// We don't need to update smi values or full pointers.
|
// We don't need to update smi values or full pointers.
|
||||||
was_compressed = true;
|
was_compressed = true;
|
||||||
*spill_slot.location() = V8HeapCompressionScheme::DecompressTaggedPointer(
|
*spill_slot.location() = V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base, static_cast<Tagged_t>(value));
|
cage_base, static_cast<Tagged_t>(value));
|
||||||
if (DEBUG_BOOL) {
|
if (DEBUG_BOOL) {
|
||||||
// Ensure that the spill slot contains correct heap object.
|
// Ensure that the spill slot contains correct heap object.
|
||||||
@ -1144,7 +1144,7 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
|
|||||||
if (!HAS_SMI_TAG(compressed_value)) {
|
if (!HAS_SMI_TAG(compressed_value)) {
|
||||||
was_compressed = slot_contents <= 0xFFFFFFFF;
|
was_compressed = slot_contents <= 0xFFFFFFFF;
|
||||||
// We don't need to update smi values.
|
// We don't need to update smi values.
|
||||||
*spill_slot.location() = V8HeapCompressionScheme::DecompressTaggedPointer(
|
*spill_slot.location() = V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base, compressed_value);
|
cage_base, compressed_value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -133,7 +133,7 @@ Object Isolate::VerifyBuiltinsResult(Object result) {
|
|||||||
// because that's the assumption in generated code (which might call this
|
// because that's the assumption in generated code (which might call this
|
||||||
// builtin).
|
// builtin).
|
||||||
if (!result.IsSmi()) {
|
if (!result.IsSmi()) {
|
||||||
DCHECK_EQ(result.ptr(), V8HeapCompressionScheme::DecompressTaggedPointer(
|
DCHECK_EQ(result.ptr(), V8HeapCompressionScheme::DecompressTagged(
|
||||||
this, static_cast<Tagged_t>(result.ptr())));
|
this, static_cast<Tagged_t>(result.ptr())));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -149,11 +149,11 @@ ObjectPair Isolate::VerifyBuiltinsResult(ObjectPair pair) {
|
|||||||
// because that's the assumption in generated code (which might call this
|
// because that's the assumption in generated code (which might call this
|
||||||
// builtin).
|
// builtin).
|
||||||
if (!HAS_SMI_TAG(pair.x)) {
|
if (!HAS_SMI_TAG(pair.x)) {
|
||||||
DCHECK_EQ(pair.x, V8HeapCompressionScheme::DecompressTaggedPointer(
|
DCHECK_EQ(pair.x, V8HeapCompressionScheme::DecompressTagged(
|
||||||
this, static_cast<Tagged_t>(pair.x)));
|
this, static_cast<Tagged_t>(pair.x)));
|
||||||
}
|
}
|
||||||
if (!HAS_SMI_TAG(pair.y)) {
|
if (!HAS_SMI_TAG(pair.y)) {
|
||||||
DCHECK_EQ(pair.y, V8HeapCompressionScheme::DecompressTaggedPointer(
|
DCHECK_EQ(pair.y, V8HeapCompressionScheme::DecompressTagged(
|
||||||
this, static_cast<Tagged_t>(pair.y)));
|
this, static_cast<Tagged_t>(pair.y)));
|
||||||
}
|
}
|
||||||
#endif // V8_COMPRESS_POINTERS
|
#endif // V8_COMPRESS_POINTERS
|
||||||
|
@ -4140,7 +4140,7 @@ void Isolate::VerifyStaticRoots() {
|
|||||||
for (Tagged_t cmp_ptr : StaticReadOnlyRootsPointerTable) {
|
for (Tagged_t cmp_ptr : StaticReadOnlyRootsPointerTable) {
|
||||||
Address the_root = roots[idx];
|
Address the_root = roots[idx];
|
||||||
Address ptr =
|
Address ptr =
|
||||||
V8HeapCompressionScheme::DecompressTaggedPointer(cage_base(), cmp_ptr);
|
V8HeapCompressionScheme::DecompressTagged(cage_base(), cmp_ptr);
|
||||||
CHECK_WITH_MSG(the_root == ptr, STATIC_ROOTS_FAILED_MSG);
|
CHECK_WITH_MSG(the_root == ptr, STATIC_ROOTS_FAILED_MSG);
|
||||||
// All roots must fit on first page, since only this page is guaranteed to
|
// All roots must fit on first page, since only this page is guaranteed to
|
||||||
// have a stable offset from the cage base. If this ever changes we need
|
// have a stable offset from the cage base. If this ever changes we need
|
||||||
@ -4378,9 +4378,9 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
|
|||||||
Address base = code_cage->base();
|
Address base = code_cage->base();
|
||||||
Address last = base + code_cage->size() - 1;
|
Address last = base + code_cage->size() - 1;
|
||||||
PtrComprCageBase code_cage_base{code_cage_base_};
|
PtrComprCageBase code_cage_base{code_cage_base_};
|
||||||
CHECK_EQ(base, ComprScheme::DecompressTaggedPointer(
|
CHECK_EQ(base, ComprScheme::DecompressTagged(
|
||||||
code_cage_base, ComprScheme::CompressTagged(base)));
|
code_cage_base, ComprScheme::CompressTagged(base)));
|
||||||
CHECK_EQ(last, ComprScheme::DecompressTaggedPointer(
|
CHECK_EQ(last, ComprScheme::DecompressTagged(
|
||||||
code_cage_base, ComprScheme::CompressTagged(last)));
|
code_cage_base, ComprScheme::CompressTagged(last)));
|
||||||
}
|
}
|
||||||
#endif // V8_EXTERNAL_CODE_SPACE
|
#endif // V8_EXTERNAL_CODE_SPACE
|
||||||
|
@ -38,7 +38,7 @@ SlotCallbackResult UpdateTypedSlotHelper::UpdateTypedSlot(Heap* heap,
|
|||||||
}
|
}
|
||||||
case SlotType::kConstPoolEmbeddedObjectCompressed: {
|
case SlotType::kConstPoolEmbeddedObjectCompressed: {
|
||||||
HeapObject old_target =
|
HeapObject old_target =
|
||||||
HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
|
HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
|
||||||
heap->isolate(), base::Memory<Tagged_t>(addr))));
|
heap->isolate(), base::Memory<Tagged_t>(addr))));
|
||||||
HeapObject new_target = old_target;
|
HeapObject new_target = old_target;
|
||||||
SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
|
SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
|
||||||
@ -80,7 +80,7 @@ HeapObject UpdateTypedSlotHelper::GetTargetObject(Heap* heap,
|
|||||||
return rinfo.target_object(heap->isolate());
|
return rinfo.target_object(heap->isolate());
|
||||||
}
|
}
|
||||||
case SlotType::kConstPoolEmbeddedObjectCompressed: {
|
case SlotType::kConstPoolEmbeddedObjectCompressed: {
|
||||||
Address full = V8HeapCompressionScheme::DecompressTaggedAny(
|
Address full = V8HeapCompressionScheme::DecompressTagged(
|
||||||
heap->isolate(), base::Memory<Tagged_t>(addr));
|
heap->isolate(), base::Memory<Tagged_t>(addr));
|
||||||
return HeapObject::cast(Object(full));
|
return HeapObject::cast(Object(full));
|
||||||
}
|
}
|
||||||
|
@ -483,10 +483,10 @@ inline void MaglevAssembler::DeoptIfBufferDetached(Register array,
|
|||||||
->DependOnArrayBufferDetachingProtector()) {
|
->DependOnArrayBufferDetachingProtector()) {
|
||||||
// A detached buffer leads to megamorphic feedback, so we won't have a deopt
|
// A detached buffer leads to megamorphic feedback, so we won't have a deopt
|
||||||
// loop if we deopt here.
|
// loop if we deopt here.
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(array, JSArrayBufferView::kBufferOffset));
|
FieldMemOperand(array, JSArrayBufferView::kBufferOffset));
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
|
FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
|
||||||
Tst(scratch.W(), Immediate(JSArrayBuffer::WasDetachedBit::kMask));
|
Tst(scratch.W(), Immediate(JSArrayBuffer::WasDetachedBit::kMask));
|
||||||
EmitEagerDeoptIf(ne, DeoptimizeReason::kArrayBufferWasDetached, node);
|
EmitEagerDeoptIf(ne, DeoptimizeReason::kArrayBufferWasDetached, node);
|
||||||
}
|
}
|
||||||
|
@ -488,7 +488,7 @@ void MaglevAssembler::LoadSingleCharacterString(Register result,
|
|||||||
Register table = scratch;
|
Register table = scratch;
|
||||||
LoadRoot(table, RootIndex::kSingleCharacterStringTable);
|
LoadRoot(table, RootIndex::kSingleCharacterStringTable);
|
||||||
Add(table, table, Operand(char_code, LSL, kTaggedSizeLog2));
|
Add(table, table, Operand(char_code, LSL, kTaggedSizeLog2));
|
||||||
DecompressAnyTagged(result, FieldMemOperand(table, FixedArray::kHeaderSize));
|
DecompressTagged(result, FieldMemOperand(table, FixedArray::kHeaderSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
|
void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
|
||||||
@ -604,8 +604,8 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
|||||||
|
|
||||||
// Is a thin string.
|
// Is a thin string.
|
||||||
{
|
{
|
||||||
DecompressAnyTagged(string,
|
DecompressTagged(string,
|
||||||
FieldMemOperand(string, ThinString::kActualOffset));
|
FieldMemOperand(string, ThinString::kActualOffset));
|
||||||
B(&loop);
|
B(&loop);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -616,8 +616,8 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
|||||||
|
|
||||||
Ldr(offset.W(), FieldMemOperand(string, SlicedString::kOffsetOffset));
|
Ldr(offset.W(), FieldMemOperand(string, SlicedString::kOffsetOffset));
|
||||||
SmiUntag(offset);
|
SmiUntag(offset);
|
||||||
DecompressAnyTagged(string,
|
DecompressTagged(string,
|
||||||
FieldMemOperand(string, SlicedString::kParentOffset));
|
FieldMemOperand(string, SlicedString::kParentOffset));
|
||||||
Add(index, index, offset);
|
Add(index, index, offset);
|
||||||
B(&loop);
|
B(&loop);
|
||||||
}
|
}
|
||||||
@ -630,8 +630,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
|||||||
Ldr(second_string.W(), FieldMemOperand(string, ConsString::kSecondOffset));
|
Ldr(second_string.W(), FieldMemOperand(string, ConsString::kSecondOffset));
|
||||||
CompareRoot(second_string, RootIndex::kempty_string);
|
CompareRoot(second_string, RootIndex::kempty_string);
|
||||||
B(deferred_runtime_call, ne);
|
B(deferred_runtime_call, ne);
|
||||||
DecompressAnyTagged(string,
|
DecompressTagged(string, FieldMemOperand(string, ConsString::kFirstOffset));
|
||||||
FieldMemOperand(string, ConsString::kFirstOffset));
|
|
||||||
B(&loop); // Try again with first string.
|
B(&loop); // Try again with first string.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,8 +92,8 @@ void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
|
|||||||
__ CompareObjectType(object, FIRST_JS_OBJECT_TYPE, scratch);
|
__ CompareObjectType(object, FIRST_JS_OBJECT_TYPE, scratch);
|
||||||
__ Assert(ge, AbortReason::kUnexpectedValue);
|
__ Assert(ge, AbortReason::kUnexpectedValue);
|
||||||
}
|
}
|
||||||
__ LoadAnyTaggedField(scratch,
|
__ LoadTaggedField(scratch,
|
||||||
FieldMemOperand(object, JSObject::kElementsOffset));
|
FieldMemOperand(object, JSObject::kElementsOffset));
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
__ AssertNotSmi(scratch);
|
__ AssertNotSmi(scratch);
|
||||||
}
|
}
|
||||||
@ -1325,8 +1325,8 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
|
|||||||
// Deopt if this isn't a thin string.
|
// Deopt if this isn't a thin string.
|
||||||
__ Tst(instance_type.W(), Immediate(kThinStringTagBit));
|
__ Tst(instance_type.W(), Immediate(kThinStringTagBit));
|
||||||
__ EmitEagerDeoptIf(eq, DeoptimizeReason::kWrongMap, node);
|
__ EmitEagerDeoptIf(eq, DeoptimizeReason::kWrongMap, node);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(object,
|
||||||
object, FieldMemOperand(object, ThinString::kActualOffset));
|
FieldMemOperand(object, ThinString::kActualOffset));
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
__ RecordComment("DCHECK IsInternalizedString");
|
__ RecordComment("DCHECK IsInternalizedString");
|
||||||
Register scratch = instance_type;
|
Register scratch = instance_type;
|
||||||
@ -1408,7 +1408,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm,
|
|||||||
const ProcessingState& state) {
|
const ProcessingState& state) {
|
||||||
Register generator = ToRegister(generator_input());
|
Register generator = ToRegister(generator_input());
|
||||||
Register array = WriteBarrierDescriptor::ObjectRegister();
|
Register array = WriteBarrierDescriptor::ObjectRegister();
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
array, FieldMemOperand(generator,
|
array, FieldMemOperand(generator,
|
||||||
JSGeneratorObject::kParametersAndRegistersOffset));
|
JSGeneratorObject::kParametersAndRegistersOffset));
|
||||||
|
|
||||||
@ -1524,7 +1524,7 @@ void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm,
|
|||||||
Register budget = temps.Acquire().W();
|
Register budget = temps.Acquire().W();
|
||||||
__ Ldr(feedback_cell,
|
__ Ldr(feedback_cell,
|
||||||
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
feedback_cell,
|
feedback_cell,
|
||||||
FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
|
FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
|
||||||
__ Ldr(budget,
|
__ Ldr(budget,
|
||||||
@ -1647,7 +1647,7 @@ void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
|
|||||||
Register budget = temps.Acquire().W();
|
Register budget = temps.Acquire().W();
|
||||||
__ Ldr(feedback_cell,
|
__ Ldr(feedback_cell,
|
||||||
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
feedback_cell,
|
feedback_cell,
|
||||||
FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
|
FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
|
||||||
__ Move(budget, v8_flags.interrupt_budget);
|
__ Move(budget, v8_flags.interrupt_budget);
|
||||||
@ -1672,7 +1672,7 @@ void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm,
|
|||||||
Register budget = temps.Acquire().W();
|
Register budget = temps.Acquire().W();
|
||||||
__ Ldr(feedback_cell,
|
__ Ldr(feedback_cell,
|
||||||
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
feedback_cell,
|
feedback_cell,
|
||||||
FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
|
FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
|
||||||
__ Ldr(budget,
|
__ Ldr(budget,
|
||||||
@ -1793,8 +1793,8 @@ void LoadFixedArrayElement::GenerateCode(MaglevAssembler* masm,
|
|||||||
}
|
}
|
||||||
Register result_reg = ToRegister(result());
|
Register result_reg = ToRegister(result());
|
||||||
__ Add(result_reg, elements, Operand(index, LSL, kTaggedSizeLog2));
|
__ Add(result_reg, elements, Operand(index, LSL, kTaggedSizeLog2));
|
||||||
__ DecompressAnyTagged(result_reg,
|
__ DecompressTagged(result_reg,
|
||||||
FieldMemOperand(result_reg, FixedArray::kHeaderSize));
|
FieldMemOperand(result_reg, FixedArray::kHeaderSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
void LoadFixedDoubleArrayElement::SetValueLocationConstraints() {
|
void LoadFixedDoubleArrayElement::SetValueLocationConstraints() {
|
||||||
@ -1829,7 +1829,7 @@ void StoreDoubleField::GenerateCode(MaglevAssembler* masm,
|
|||||||
Register tmp = temps.Acquire();
|
Register tmp = temps.Acquire();
|
||||||
|
|
||||||
__ AssertNotSmi(object);
|
__ AssertNotSmi(object);
|
||||||
__ DecompressAnyTagged(tmp, FieldMemOperand(object, offset()));
|
__ DecompressTagged(tmp, FieldMemOperand(object, offset()));
|
||||||
__ AssertNotSmi(tmp);
|
__ AssertNotSmi(tmp);
|
||||||
__ Move(FieldMemOperand(tmp, HeapNumber::kValueOffset), value);
|
__ Move(FieldMemOperand(tmp, HeapNumber::kValueOffset), value);
|
||||||
}
|
}
|
||||||
|
@ -32,9 +32,8 @@ void MaglevAssembler::LoadSingleCharacterString(Register result,
|
|||||||
DCHECK_LT(char_code, String::kMaxOneByteCharCode);
|
DCHECK_LT(char_code, String::kMaxOneByteCharCode);
|
||||||
Register table = result;
|
Register table = result;
|
||||||
LoadRoot(table, RootIndex::kSingleCharacterStringTable);
|
LoadRoot(table, RootIndex::kSingleCharacterStringTable);
|
||||||
DecompressAnyTagged(
|
DecompressTagged(result, FieldMemOperand(table, FixedArray::kHeaderSize +
|
||||||
result, FieldMemOperand(
|
char_code * kTaggedSize));
|
||||||
table, FixedArray::kHeaderSize + char_code * kTaggedSize));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MaglevAssembler::LoadDataField(const PolymorphicAccessInfo& access_info,
|
void MaglevAssembler::LoadDataField(const PolymorphicAccessInfo& access_info,
|
||||||
@ -54,13 +53,12 @@ void MaglevAssembler::LoadDataField(const PolymorphicAccessInfo& access_info,
|
|||||||
}
|
}
|
||||||
// The field is in the property array, first load it from there.
|
// The field is in the property array, first load it from there.
|
||||||
AssertNotSmi(load_source_object);
|
AssertNotSmi(load_source_object);
|
||||||
DecompressAnyTagged(load_source,
|
DecompressTagged(load_source,
|
||||||
FieldMemOperand(load_source_object,
|
FieldMemOperand(load_source_object,
|
||||||
JSReceiver::kPropertiesOrHashOffset));
|
JSReceiver::kPropertiesOrHashOffset));
|
||||||
}
|
}
|
||||||
AssertNotSmi(load_source);
|
AssertNotSmi(load_source);
|
||||||
DecompressAnyTagged(result,
|
DecompressTagged(result, FieldMemOperand(load_source, field_index.offset()));
|
||||||
FieldMemOperand(load_source, field_index.offset()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace maglev
|
} // namespace maglev
|
||||||
|
@ -953,7 +953,7 @@ void LoadDoubleField::GenerateCode(MaglevAssembler* masm,
|
|||||||
Register tmp = temps.Acquire();
|
Register tmp = temps.Acquire();
|
||||||
Register object = ToRegister(object_input());
|
Register object = ToRegister(object_input());
|
||||||
__ AssertNotSmi(object);
|
__ AssertNotSmi(object);
|
||||||
__ DecompressAnyTagged(tmp, FieldMemOperand(object, offset()));
|
__ DecompressTagged(tmp, FieldMemOperand(object, offset()));
|
||||||
__ AssertNotSmi(tmp);
|
__ AssertNotSmi(tmp);
|
||||||
__ LoadHeapNumberValue(ToDoubleRegister(result()), tmp);
|
__ LoadHeapNumberValue(ToDoubleRegister(result()), tmp);
|
||||||
}
|
}
|
||||||
@ -966,8 +966,7 @@ void LoadTaggedField::GenerateCode(MaglevAssembler* masm,
|
|||||||
const ProcessingState& state) {
|
const ProcessingState& state) {
|
||||||
Register object = ToRegister(object_input());
|
Register object = ToRegister(object_input());
|
||||||
__ AssertNotSmi(object);
|
__ AssertNotSmi(object);
|
||||||
__ DecompressAnyTagged(ToRegister(result()),
|
__ DecompressTagged(ToRegister(result()), FieldMemOperand(object, offset()));
|
||||||
FieldMemOperand(object, offset()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -1065,8 +1064,8 @@ void LoadPolymorphicTaggedField::GenerateCode(MaglevAssembler* masm,
|
|||||||
Register cell = map; // Reuse scratch.
|
Register cell = map; // Reuse scratch.
|
||||||
__ Move(cell, access_info.cell());
|
__ Move(cell, access_info.cell());
|
||||||
__ AssertNotSmi(cell);
|
__ AssertNotSmi(cell);
|
||||||
__ DecompressAnyTagged(result,
|
__ DecompressTagged(result,
|
||||||
FieldMemOperand(cell, Cell::kValueOffset));
|
FieldMemOperand(cell, Cell::kValueOffset));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case PolymorphicAccessInfo::kDataLoad: {
|
case PolymorphicAccessInfo::kDataLoad: {
|
||||||
@ -1878,7 +1877,7 @@ void GeneratorRestoreRegister::GenerateCode(MaglevAssembler* masm,
|
|||||||
Register value = (array == result_reg ? temp : result_reg);
|
Register value = (array == result_reg ? temp : result_reg);
|
||||||
|
|
||||||
// Loads the current value in the generator register file.
|
// Loads the current value in the generator register file.
|
||||||
__ DecompressAnyTagged(
|
__ DecompressTagged(
|
||||||
value, FieldMemOperand(array, FixedArray::OffsetOfElementAt(index())));
|
value, FieldMemOperand(array, FixedArray::OffsetOfElementAt(index())));
|
||||||
|
|
||||||
// And trashs it with StaleRegisterConstant.
|
// And trashs it with StaleRegisterConstant.
|
||||||
@ -2429,9 +2428,9 @@ void CallKnownJSFunction::GenerateCode(MaglevAssembler* masm,
|
|||||||
__ CallBuiltin(shared_function_info().builtin_id());
|
__ CallBuiltin(shared_function_info().builtin_id());
|
||||||
} else {
|
} else {
|
||||||
__ AssertCallableFunction(kJavaScriptCallTargetRegister);
|
__ AssertCallableFunction(kJavaScriptCallTargetRegister);
|
||||||
__ LoadTaggedPointerField(kJavaScriptCallCodeStartRegister,
|
__ LoadTaggedField(kJavaScriptCallCodeStartRegister,
|
||||||
FieldMemOperand(kJavaScriptCallTargetRegister,
|
FieldMemOperand(kJavaScriptCallTargetRegister,
|
||||||
JSFunction::kCodeOffset));
|
JSFunction::kCodeOffset));
|
||||||
__ CallCodeObject(kJavaScriptCallCodeStartRegister);
|
__ CallCodeObject(kJavaScriptCallCodeStartRegister);
|
||||||
}
|
}
|
||||||
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
|
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
|
||||||
|
@ -372,10 +372,10 @@ inline void MaglevAssembler::DeoptIfBufferDetached(Register array,
|
|||||||
->DependOnArrayBufferDetachingProtector()) {
|
->DependOnArrayBufferDetachingProtector()) {
|
||||||
// A detached buffer leads to megamorphic feedback, so we won't have a deopt
|
// A detached buffer leads to megamorphic feedback, so we won't have a deopt
|
||||||
// loop if we deopt here.
|
// loop if we deopt here.
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(scratch,
|
||||||
scratch, FieldOperand(array, JSArrayBufferView::kBufferOffset));
|
FieldOperand(array, JSArrayBufferView::kBufferOffset));
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(scratch,
|
||||||
scratch, FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset));
|
FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset));
|
||||||
testl(scratch, Immediate(JSArrayBuffer::WasDetachedBit::kMask));
|
testl(scratch, Immediate(JSArrayBuffer::WasDetachedBit::kMask));
|
||||||
EmitEagerDeoptIf(not_zero, DeoptimizeReason::kArrayBufferWasDetached, node);
|
EmitEagerDeoptIf(not_zero, DeoptimizeReason::kArrayBufferWasDetached, node);
|
||||||
}
|
}
|
||||||
|
@ -111,8 +111,8 @@ void MaglevAssembler::LoadSingleCharacterString(Register result,
|
|||||||
DCHECK_NE(char_code, scratch);
|
DCHECK_NE(char_code, scratch);
|
||||||
Register table = scratch;
|
Register table = scratch;
|
||||||
LoadRoot(table, RootIndex::kSingleCharacterStringTable);
|
LoadRoot(table, RootIndex::kSingleCharacterStringTable);
|
||||||
DecompressAnyTagged(result, FieldOperand(table, char_code, times_tagged_size,
|
DecompressTagged(result, FieldOperand(table, char_code, times_tagged_size,
|
||||||
FixedArray::kHeaderSize));
|
FixedArray::kHeaderSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
|
void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
|
||||||
@ -224,8 +224,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
|||||||
|
|
||||||
// Is a thin string.
|
// Is a thin string.
|
||||||
{
|
{
|
||||||
DecompressAnyTagged(string,
|
DecompressTagged(string, FieldOperand(string, ThinString::kActualOffset));
|
||||||
FieldOperand(string, ThinString::kActualOffset));
|
|
||||||
jmp(&loop, Label::kNear);
|
jmp(&loop, Label::kNear);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -234,8 +233,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
|||||||
Register offset = scratch;
|
Register offset = scratch;
|
||||||
movl(offset, FieldOperand(string, SlicedString::kOffsetOffset));
|
movl(offset, FieldOperand(string, SlicedString::kOffsetOffset));
|
||||||
SmiUntag(offset);
|
SmiUntag(offset);
|
||||||
DecompressAnyTagged(string,
|
DecompressTagged(string, FieldOperand(string, SlicedString::kParentOffset));
|
||||||
FieldOperand(string, SlicedString::kParentOffset));
|
|
||||||
addl(index, offset);
|
addl(index, offset);
|
||||||
jmp(&loop, Label::kNear);
|
jmp(&loop, Label::kNear);
|
||||||
}
|
}
|
||||||
@ -245,7 +243,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
|
|||||||
CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
|
CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
|
||||||
RootIndex::kempty_string);
|
RootIndex::kempty_string);
|
||||||
j(not_equal, deferred_runtime_call);
|
j(not_equal, deferred_runtime_call);
|
||||||
DecompressAnyTagged(string, FieldOperand(string, ConsString::kFirstOffset));
|
DecompressTagged(string, FieldOperand(string, ConsString::kFirstOffset));
|
||||||
jmp(&loop, Label::kNear); // Try again with first string.
|
jmp(&loop, Label::kNear); // Try again with first string.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm,
|
|||||||
const ProcessingState& state) {
|
const ProcessingState& state) {
|
||||||
Register generator = ToRegister(generator_input());
|
Register generator = ToRegister(generator_input());
|
||||||
Register array = WriteBarrierDescriptor::ObjectRegister();
|
Register array = WriteBarrierDescriptor::ObjectRegister();
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
array, FieldOperand(generator,
|
array, FieldOperand(generator,
|
||||||
JSGeneratorObject::kParametersAndRegistersOffset));
|
JSGeneratorObject::kParametersAndRegistersOffset));
|
||||||
|
|
||||||
@ -498,8 +498,8 @@ void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
|
|||||||
__ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister);
|
__ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister);
|
||||||
__ Assert(greater_equal, AbortReason::kUnexpectedValue);
|
__ Assert(greater_equal, AbortReason::kUnexpectedValue);
|
||||||
}
|
}
|
||||||
__ LoadAnyTaggedField(kScratchRegister,
|
__ LoadTaggedField(kScratchRegister,
|
||||||
FieldOperand(object, JSObject::kElementsOffset));
|
FieldOperand(object, JSObject::kElementsOffset));
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
__ AssertNotSmi(kScratchRegister);
|
__ AssertNotSmi(kScratchRegister);
|
||||||
}
|
}
|
||||||
@ -548,8 +548,8 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
|
|||||||
// Deopt if this isn't a thin string.
|
// Deopt if this isn't a thin string.
|
||||||
__ testb(map_tmp, Immediate(kThinStringTagBit));
|
__ testb(map_tmp, Immediate(kThinStringTagBit));
|
||||||
__ EmitEagerDeoptIf(zero, DeoptimizeReason::kWrongMap, node);
|
__ EmitEagerDeoptIf(zero, DeoptimizeReason::kWrongMap, node);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(object,
|
||||||
object, FieldOperand(object, ThinString::kActualOffset));
|
FieldOperand(object, ThinString::kActualOffset));
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
__ RecordComment("DCHECK IsInternalizedString");
|
__ RecordComment("DCHECK IsInternalizedString");
|
||||||
__ LoadMap(map_tmp, object);
|
__ LoadMap(map_tmp, object);
|
||||||
@ -721,9 +721,9 @@ void LoadFixedArrayElement::GenerateCode(MaglevAssembler* masm,
|
|||||||
__ cmpq(index, Immediate(0));
|
__ cmpq(index, Immediate(0));
|
||||||
__ Assert(above_equal, AbortReason::kUnexpectedNegativeValue);
|
__ Assert(above_equal, AbortReason::kUnexpectedNegativeValue);
|
||||||
}
|
}
|
||||||
__ DecompressAnyTagged(result_reg,
|
__ DecompressTagged(result_reg,
|
||||||
FieldOperand(elements, index, times_tagged_size,
|
FieldOperand(elements, index, times_tagged_size,
|
||||||
FixedArray::kHeaderSize));
|
FixedArray::kHeaderSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
void LoadFixedDoubleArrayElement::SetValueLocationConstraints() {
|
void LoadFixedDoubleArrayElement::SetValueLocationConstraints() {
|
||||||
@ -1092,7 +1092,7 @@ void StoreDoubleField::GenerateCode(MaglevAssembler* masm,
|
|||||||
DoubleRegister value = ToDoubleRegister(value_input());
|
DoubleRegister value = ToDoubleRegister(value_input());
|
||||||
|
|
||||||
__ AssertNotSmi(object);
|
__ AssertNotSmi(object);
|
||||||
__ DecompressAnyTagged(tmp, FieldOperand(object, offset()));
|
__ DecompressTagged(tmp, FieldOperand(object, offset()));
|
||||||
__ AssertNotSmi(tmp);
|
__ AssertNotSmi(tmp);
|
||||||
__ Movsd(FieldOperand(tmp, HeapNumber::kValueOffset), value);
|
__ Movsd(FieldOperand(tmp, HeapNumber::kValueOffset), value);
|
||||||
}
|
}
|
||||||
@ -2162,8 +2162,8 @@ void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm,
|
|||||||
MaglevAssembler::ScratchRegisterScope temps(masm);
|
MaglevAssembler::ScratchRegisterScope temps(masm);
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
|
FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
|
||||||
__ addl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
|
__ addl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
|
||||||
Immediate(amount()));
|
Immediate(amount()));
|
||||||
}
|
}
|
||||||
@ -2253,8 +2253,8 @@ void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
|
|||||||
__ incl(FieldOperand(scratch0, FeedbackVector::kProfilerTicksOffset));
|
__ incl(FieldOperand(scratch0, FeedbackVector::kProfilerTicksOffset));
|
||||||
// JSFunction::SetInterruptBudget.
|
// JSFunction::SetInterruptBudget.
|
||||||
__ movq(scratch0, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
__ movq(scratch0, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(scratch0,
|
||||||
scratch0, FieldOperand(scratch0, JSFunction::kFeedbackCellOffset));
|
FieldOperand(scratch0, JSFunction::kFeedbackCellOffset));
|
||||||
__ movl(FieldOperand(scratch0, FeedbackCell::kInterruptBudgetOffset),
|
__ movl(FieldOperand(scratch0, FeedbackCell::kInterruptBudgetOffset),
|
||||||
Immediate(v8_flags.interrupt_budget));
|
Immediate(v8_flags.interrupt_budget));
|
||||||
__ jmp(*done);
|
__ jmp(*done);
|
||||||
@ -2272,8 +2272,8 @@ void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm,
|
|||||||
MaglevAssembler::ScratchRegisterScope temps(masm);
|
MaglevAssembler::ScratchRegisterScope temps(masm);
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
|
FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
|
||||||
__ subl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
|
__ subl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
|
||||||
Immediate(amount()));
|
Immediate(amount()));
|
||||||
ZoneLabelRef done(masm);
|
ZoneLabelRef done(masm);
|
||||||
|
@ -35,12 +35,12 @@ bool CompressedObjectSlot::contains_map_value(Address raw_value) const {
|
|||||||
|
|
||||||
Object CompressedObjectSlot::operator*() const {
|
Object CompressedObjectSlot::operator*() const {
|
||||||
Tagged_t value = *location();
|
Tagged_t value = *location();
|
||||||
return Object(TCompressionScheme::DecompressTaggedAny(address(), value));
|
return Object(TCompressionScheme::DecompressTagged(address(), value));
|
||||||
}
|
}
|
||||||
|
|
||||||
Object CompressedObjectSlot::load(PtrComprCageBase cage_base) const {
|
Object CompressedObjectSlot::load(PtrComprCageBase cage_base) const {
|
||||||
Tagged_t value = *location();
|
Tagged_t value = *location();
|
||||||
return Object(TCompressionScheme::DecompressTaggedAny(cage_base, value));
|
return Object(TCompressionScheme::DecompressTagged(cage_base, value));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompressedObjectSlot::store(Object value) const {
|
void CompressedObjectSlot::store(Object value) const {
|
||||||
@ -63,17 +63,17 @@ Map CompressedObjectSlot::load_map() const {
|
|||||||
|
|
||||||
Object CompressedObjectSlot::Acquire_Load() const {
|
Object CompressedObjectSlot::Acquire_Load() const {
|
||||||
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
|
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
|
||||||
return Object(TCompressionScheme::DecompressTaggedAny(address(), value));
|
return Object(TCompressionScheme::DecompressTagged(address(), value));
|
||||||
}
|
}
|
||||||
|
|
||||||
Object CompressedObjectSlot::Relaxed_Load() const {
|
Object CompressedObjectSlot::Relaxed_Load() const {
|
||||||
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
||||||
return Object(TCompressionScheme::DecompressTaggedAny(address(), value));
|
return Object(TCompressionScheme::DecompressTagged(address(), value));
|
||||||
}
|
}
|
||||||
|
|
||||||
Object CompressedObjectSlot::Relaxed_Load(PtrComprCageBase cage_base) const {
|
Object CompressedObjectSlot::Relaxed_Load(PtrComprCageBase cage_base) const {
|
||||||
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
||||||
return Object(TCompressionScheme::DecompressTaggedAny(cage_base, value));
|
return Object(TCompressionScheme::DecompressTagged(cage_base, value));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompressedObjectSlot::Relaxed_Store(Object value) const {
|
void CompressedObjectSlot::Relaxed_Store(Object value) const {
|
||||||
@ -92,7 +92,7 @@ Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
|
|||||||
Tagged_t target_ptr = TCompressionScheme::CompressTagged(target.ptr());
|
Tagged_t target_ptr = TCompressionScheme::CompressTagged(target.ptr());
|
||||||
Tagged_t result =
|
Tagged_t result =
|
||||||
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
|
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
|
||||||
return Object(TCompressionScheme::DecompressTaggedAny(address(), result));
|
return Object(TCompressionScheme::DecompressTagged(address(), result));
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -101,12 +101,12 @@ Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
|
|||||||
|
|
||||||
MaybeObject CompressedMaybeObjectSlot::operator*() const {
|
MaybeObject CompressedMaybeObjectSlot::operator*() const {
|
||||||
Tagged_t value = *location();
|
Tagged_t value = *location();
|
||||||
return MaybeObject(TCompressionScheme::DecompressTaggedAny(address(), value));
|
return MaybeObject(TCompressionScheme::DecompressTagged(address(), value));
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeObject CompressedMaybeObjectSlot::load(PtrComprCageBase cage_base) const {
|
MaybeObject CompressedMaybeObjectSlot::load(PtrComprCageBase cage_base) const {
|
||||||
Tagged_t value = *location();
|
Tagged_t value = *location();
|
||||||
return MaybeObject(TCompressionScheme::DecompressTaggedAny(cage_base, value));
|
return MaybeObject(TCompressionScheme::DecompressTagged(cage_base, value));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompressedMaybeObjectSlot::store(MaybeObject value) const {
|
void CompressedMaybeObjectSlot::store(MaybeObject value) const {
|
||||||
@ -115,13 +115,13 @@ void CompressedMaybeObjectSlot::store(MaybeObject value) const {
|
|||||||
|
|
||||||
MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
|
MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
|
||||||
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
||||||
return MaybeObject(TCompressionScheme::DecompressTaggedAny(address(), value));
|
return MaybeObject(TCompressionScheme::DecompressTagged(address(), value));
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeObject CompressedMaybeObjectSlot::Relaxed_Load(
|
MaybeObject CompressedMaybeObjectSlot::Relaxed_Load(
|
||||||
PtrComprCageBase cage_base) const {
|
PtrComprCageBase cage_base) const {
|
||||||
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
|
||||||
return MaybeObject(TCompressionScheme::DecompressTaggedAny(cage_base, value));
|
return MaybeObject(TCompressionScheme::DecompressTagged(cage_base, value));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompressedMaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
|
void CompressedMaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
|
||||||
@ -143,14 +143,14 @@ void CompressedMaybeObjectSlot::Release_CompareAndSwap(
|
|||||||
HeapObjectReference CompressedHeapObjectSlot::operator*() const {
|
HeapObjectReference CompressedHeapObjectSlot::operator*() const {
|
||||||
Tagged_t value = *location();
|
Tagged_t value = *location();
|
||||||
return HeapObjectReference(
|
return HeapObjectReference(
|
||||||
TCompressionScheme::DecompressTaggedPointer(address(), value));
|
TCompressionScheme::DecompressTagged(address(), value));
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapObjectReference CompressedHeapObjectSlot::load(
|
HeapObjectReference CompressedHeapObjectSlot::load(
|
||||||
PtrComprCageBase cage_base) const {
|
PtrComprCageBase cage_base) const {
|
||||||
Tagged_t value = *location();
|
Tagged_t value = *location();
|
||||||
return HeapObjectReference(
|
return HeapObjectReference(
|
||||||
TCompressionScheme::DecompressTaggedPointer(cage_base, value));
|
TCompressionScheme::DecompressTagged(cage_base, value));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompressedHeapObjectSlot::store(HeapObjectReference value) const {
|
void CompressedHeapObjectSlot::store(HeapObjectReference value) const {
|
||||||
@ -161,7 +161,7 @@ HeapObject CompressedHeapObjectSlot::ToHeapObject() const {
|
|||||||
Tagged_t value = *location();
|
Tagged_t value = *location();
|
||||||
DCHECK(HAS_STRONG_HEAP_OBJECT_TAG(value));
|
DCHECK(HAS_STRONG_HEAP_OBJECT_TAG(value));
|
||||||
return HeapObject::cast(
|
return HeapObject::cast(
|
||||||
Object(TCompressionScheme::DecompressTaggedPointer(address(), value)));
|
Object(TCompressionScheme::DecompressTagged(address(), value)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const {
|
void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const {
|
||||||
@ -176,7 +176,7 @@ template <typename CompressionScheme>
|
|||||||
Object OffHeapCompressedObjectSlot<CompressionScheme>::load(
|
Object OffHeapCompressedObjectSlot<CompressionScheme>::load(
|
||||||
PtrComprCageBase cage_base) const {
|
PtrComprCageBase cage_base) const {
|
||||||
Tagged_t value = *TSlotBase::location();
|
Tagged_t value = *TSlotBase::location();
|
||||||
return Object(CompressionScheme::DecompressTaggedAny(cage_base, value));
|
return Object(CompressionScheme::DecompressTagged(cage_base, value));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename CompressionScheme>
|
template <typename CompressionScheme>
|
||||||
@ -188,14 +188,14 @@ template <typename CompressionScheme>
|
|||||||
Object OffHeapCompressedObjectSlot<CompressionScheme>::Relaxed_Load(
|
Object OffHeapCompressedObjectSlot<CompressionScheme>::Relaxed_Load(
|
||||||
PtrComprCageBase cage_base) const {
|
PtrComprCageBase cage_base) const {
|
||||||
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(TSlotBase::location());
|
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(TSlotBase::location());
|
||||||
return Object(CompressionScheme::DecompressTaggedAny(cage_base, value));
|
return Object(CompressionScheme::DecompressTagged(cage_base, value));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename CompressionScheme>
|
template <typename CompressionScheme>
|
||||||
Object OffHeapCompressedObjectSlot<CompressionScheme>::Acquire_Load(
|
Object OffHeapCompressedObjectSlot<CompressionScheme>::Acquire_Load(
|
||||||
PtrComprCageBase cage_base) const {
|
PtrComprCageBase cage_base) const {
|
||||||
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(TSlotBase::location());
|
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(TSlotBase::location());
|
||||||
return Object(CompressionScheme::DecompressTaggedAny(cage_base, value));
|
return Object(CompressionScheme::DecompressTagged(cage_base, value));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename CompressionScheme>
|
template <typename CompressionScheme>
|
||||||
|
@ -479,8 +479,8 @@ void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
|
|||||||
AtomicSlot end(start + sort_size);
|
AtomicSlot end(start + sort_size);
|
||||||
std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) {
|
std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) {
|
||||||
#ifdef V8_COMPRESS_POINTERS
|
#ifdef V8_COMPRESS_POINTERS
|
||||||
Object a(V8HeapCompressionScheme::DecompressTaggedAny(isolate, elementA));
|
Object a(V8HeapCompressionScheme::DecompressTagged(isolate, elementA));
|
||||||
Object b(V8HeapCompressionScheme::DecompressTaggedAny(isolate, elementB));
|
Object b(V8HeapCompressionScheme::DecompressTagged(isolate, elementB));
|
||||||
#else
|
#else
|
||||||
Object a(elementA);
|
Object a(elementA);
|
||||||
Object b(elementB);
|
Object b(elementB);
|
||||||
|
@ -84,7 +84,7 @@ HeapObjectReference HeapObjectReference::ClearedValue(
|
|||||||
#ifdef V8_COMPRESS_POINTERS
|
#ifdef V8_COMPRESS_POINTERS
|
||||||
// This is necessary to make pointer decompression computation also
|
// This is necessary to make pointer decompression computation also
|
||||||
// suitable for cleared weak references.
|
// suitable for cleared weak references.
|
||||||
Address raw_value = V8HeapCompressionScheme::DecompressTaggedPointer(
|
Address raw_value = V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base, kClearedWeakHeapObjectLower32);
|
cage_base, kClearedWeakHeapObjectLower32);
|
||||||
#else
|
#else
|
||||||
Address raw_value = kClearedWeakHeapObjectLower32;
|
Address raw_value = kClearedWeakHeapObjectLower32;
|
||||||
|
@ -35,10 +35,9 @@ Address TaggedField<T, kFieldOffset, CompressionScheme>::tagged_to_full(
|
|||||||
if (kIsSmi) {
|
if (kIsSmi) {
|
||||||
return CompressionScheme::DecompressTaggedSigned(tagged_value);
|
return CompressionScheme::DecompressTaggedSigned(tagged_value);
|
||||||
} else if (kIsHeapObject) {
|
} else if (kIsHeapObject) {
|
||||||
return CompressionScheme::DecompressTaggedPointer(on_heap_addr,
|
return CompressionScheme::DecompressTagged(on_heap_addr, tagged_value);
|
||||||
tagged_value);
|
|
||||||
} else {
|
} else {
|
||||||
return CompressionScheme::DecompressTaggedAny(on_heap_addr, tagged_value);
|
return CompressionScheme::DecompressTagged(on_heap_addr, tagged_value);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
return tagged_value;
|
return tagged_value;
|
||||||
|
@ -112,9 +112,8 @@ bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfStrong(
|
|||||||
if (kIsFull) return GetHeapObjectIfStrong(result);
|
if (kIsFull) return GetHeapObjectIfStrong(result);
|
||||||
// Implementation for compressed pointers.
|
// Implementation for compressed pointers.
|
||||||
if (IsStrong()) {
|
if (IsStrong()) {
|
||||||
*result =
|
*result = HeapObject::cast(Object(CompressionScheme::DecompressTagged(
|
||||||
HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
|
isolate, static_cast<Tagged_t>(ptr_))));
|
||||||
isolate, static_cast<Tagged_t>(ptr_))));
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -138,7 +137,7 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeStrong(
|
|||||||
if (kIsFull) return GetHeapObjectAssumeStrong();
|
if (kIsFull) return GetHeapObjectAssumeStrong();
|
||||||
// Implementation for compressed pointers.
|
// Implementation for compressed pointers.
|
||||||
DCHECK(IsStrong());
|
DCHECK(IsStrong());
|
||||||
return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
|
return HeapObject::cast(Object(CompressionScheme::DecompressTagged(
|
||||||
isolate, static_cast<Tagged_t>(ptr_))));
|
isolate, static_cast<Tagged_t>(ptr_))));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,11 +223,11 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject(
|
|||||||
DCHECK(!IsSmi());
|
DCHECK(!IsSmi());
|
||||||
if (kCanBeWeak) {
|
if (kCanBeWeak) {
|
||||||
DCHECK(!IsCleared());
|
DCHECK(!IsCleared());
|
||||||
return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
|
return HeapObject::cast(Object(CompressionScheme::DecompressTagged(
|
||||||
isolate, static_cast<Tagged_t>(ptr_) & ~kWeakHeapObjectMask)));
|
isolate, static_cast<Tagged_t>(ptr_) & ~kWeakHeapObjectMask)));
|
||||||
} else {
|
} else {
|
||||||
DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
|
DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
|
||||||
return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
|
return HeapObject::cast(Object(CompressionScheme::DecompressTagged(
|
||||||
isolate, static_cast<Tagged_t>(ptr_))));
|
isolate, static_cast<Tagged_t>(ptr_))));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ inline StrongTaggedValue::StrongTaggedValue(Object o)
|
|||||||
|
|
||||||
Object StrongTaggedValue::ToObject(Isolate* isolate, StrongTaggedValue object) {
|
Object StrongTaggedValue::ToObject(Isolate* isolate, StrongTaggedValue object) {
|
||||||
#ifdef V8_COMPRESS_POINTERS
|
#ifdef V8_COMPRESS_POINTERS
|
||||||
return Object(CompressionScheme::DecompressTaggedAny(isolate, object.ptr()));
|
return Object(CompressionScheme::DecompressTagged(isolate, object.ptr()));
|
||||||
#else
|
#else
|
||||||
return Object(object.ptr());
|
return Object(object.ptr());
|
||||||
#endif
|
#endif
|
||||||
@ -49,7 +49,7 @@ inline TaggedValue::TaggedValue(MaybeObject o)
|
|||||||
MaybeObject TaggedValue::ToMaybeObject(Isolate* isolate, TaggedValue object) {
|
MaybeObject TaggedValue::ToMaybeObject(Isolate* isolate, TaggedValue object) {
|
||||||
#ifdef V8_COMPRESS_POINTERS
|
#ifdef V8_COMPRESS_POINTERS
|
||||||
return MaybeObject(
|
return MaybeObject(
|
||||||
CompressionScheme::DecompressTaggedAny(isolate, object.ptr()));
|
CompressionScheme::DecompressTagged(isolate, object.ptr()));
|
||||||
#else
|
#else
|
||||||
return MaybeObject(object.ptr());
|
return MaybeObject(object.ptr());
|
||||||
#endif
|
#endif
|
||||||
|
@ -125,7 +125,7 @@ void ReadOnlyRoots::VerifyNameForProtectorsPages() const {
|
|||||||
|
|
||||||
Address ReadOnlyRoots::at(RootIndex root_index) const {
|
Address ReadOnlyRoots::at(RootIndex root_index) const {
|
||||||
#if V8_STATIC_ROOTS_BOOL
|
#if V8_STATIC_ROOTS_BOOL
|
||||||
return V8HeapCompressionScheme::DecompressTaggedPointer(
|
return V8HeapCompressionScheme::DecompressTagged(
|
||||||
V8HeapCompressionScheme::base(),
|
V8HeapCompressionScheme::base(),
|
||||||
StaticReadOnlyRootsPointerTable[static_cast<int>(root_index)]);
|
StaticReadOnlyRootsPointerTable[static_cast<int>(root_index)]);
|
||||||
#else
|
#else
|
||||||
|
@ -75,8 +75,7 @@ void ReadOnlyRoots::InitFromStaticRootsTable(Address cage_base) {
|
|||||||
#if V8_STATIC_ROOTS_BOOL
|
#if V8_STATIC_ROOTS_BOOL
|
||||||
RootIndex pos = RootIndex::kFirstReadOnlyRoot;
|
RootIndex pos = RootIndex::kFirstReadOnlyRoot;
|
||||||
for (auto element : StaticReadOnlyRootsPointerTable) {
|
for (auto element : StaticReadOnlyRootsPointerTable) {
|
||||||
auto ptr =
|
auto ptr = V8HeapCompressionScheme::DecompressTagged(cage_base, element);
|
||||||
V8HeapCompressionScheme::DecompressTaggedPointer(cage_base, element);
|
|
||||||
DCHECK(!is_initialized(pos));
|
DCHECK(!is_initialized(pos));
|
||||||
read_only_roots_[static_cast<size_t>(pos)] = ptr;
|
read_only_roots_[static_cast<size_t>(pos)] = ptr;
|
||||||
++pos;
|
++pos;
|
||||||
|
@ -438,7 +438,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
|
|||||||
Register instance,
|
Register instance,
|
||||||
int offset) {
|
int offset) {
|
||||||
DCHECK_LE(0, offset);
|
DCHECK_LE(0, offset);
|
||||||
LoadTaggedPointerField(dst, MemOperand{instance, offset});
|
LoadTaggedField(dst, MemOperand{instance, offset});
|
||||||
}
|
}
|
||||||
|
|
||||||
void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
|
void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
|
||||||
@ -461,7 +461,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
|
|||||||
unsigned shift_amount = !needs_shift ? 0 : COMPRESS_POINTERS_BOOL ? 2 : 3;
|
unsigned shift_amount = !needs_shift ? 0 : COMPRESS_POINTERS_BOOL ? 2 : 3;
|
||||||
MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
|
MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
|
||||||
offset_imm, false, shift_amount);
|
offset_imm, false, shift_amount);
|
||||||
LoadTaggedPointerField(dst, src_op);
|
LoadTaggedField(dst, src_op);
|
||||||
}
|
}
|
||||||
|
|
||||||
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
|
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
|
||||||
@ -502,7 +502,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
|
|||||||
bind(&write_barrier);
|
bind(&write_barrier);
|
||||||
JumpIfSmi(src.gp(), &exit);
|
JumpIfSmi(src.gp(), &exit);
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
DecompressTaggedPointer(src.gp(), src.gp());
|
DecompressTagged(src.gp(), src.gp());
|
||||||
}
|
}
|
||||||
CheckPageFlag(src.gp(),
|
CheckPageFlag(src.gp(),
|
||||||
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
||||||
|
@ -266,7 +266,7 @@ void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
|
|||||||
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
|
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
|
||||||
Register instance,
|
Register instance,
|
||||||
int offset) {
|
int offset) {
|
||||||
LoadTaggedPointerField(dst, MemOperand(instance, offset), r0);
|
LoadTaggedField(dst, MemOperand(instance, offset), r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void LiftoffAssembler::SpillInstance(Register instance) {
|
void LiftoffAssembler::SpillInstance(Register instance) {
|
||||||
@ -283,7 +283,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
|
|||||||
ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
|
ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
|
||||||
offset_reg = ip;
|
offset_reg = ip;
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(dst, MemOperand(src_addr, offset_reg, offset_imm), r0);
|
LoadTaggedField(dst, MemOperand(src_addr, offset_reg, offset_imm), r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
|
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
|
||||||
@ -310,7 +310,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
|
|||||||
bind(&write_barrier);
|
bind(&write_barrier);
|
||||||
JumpIfSmi(src.gp(), &exit);
|
JumpIfSmi(src.gp(), &exit);
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
DecompressTaggedPointer(src.gp(), src.gp());
|
DecompressTagged(src.gp(), src.gp());
|
||||||
}
|
}
|
||||||
CheckPageFlag(src.gp(), ip,
|
CheckPageFlag(src.gp(), ip,
|
||||||
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
||||||
|
@ -196,7 +196,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
|
|||||||
Register instance,
|
Register instance,
|
||||||
int offset) {
|
int offset) {
|
||||||
DCHECK_LE(0, offset);
|
DCHECK_LE(0, offset);
|
||||||
LoadTaggedPointerField(dst, MemOperand{instance, offset});
|
LoadTaggedField(dst, MemOperand{instance, offset});
|
||||||
}
|
}
|
||||||
|
|
||||||
void LiftoffAssembler::SpillInstance(Register instance) {
|
void LiftoffAssembler::SpillInstance(Register instance) {
|
||||||
|
@ -246,7 +246,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
|
|||||||
Register instance,
|
Register instance,
|
||||||
int offset) {
|
int offset) {
|
||||||
DCHECK_LE(0, offset);
|
DCHECK_LE(0, offset);
|
||||||
LoadTaggedPointerField(dst, MemOperand(instance, offset));
|
LoadTaggedField(dst, MemOperand(instance, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void LiftoffAssembler::SpillInstance(Register instance) {
|
void LiftoffAssembler::SpillInstance(Register instance) {
|
||||||
@ -264,7 +264,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
|
|||||||
ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
|
ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
|
||||||
offset_reg = ip;
|
offset_reg = ip;
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(
|
LoadTaggedField(
|
||||||
dst,
|
dst,
|
||||||
MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
|
MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
|
||||||
}
|
}
|
||||||
@ -295,7 +295,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
|
|||||||
bind(&write_barrier);
|
bind(&write_barrier);
|
||||||
JumpIfSmi(src.gp(), &exit);
|
JumpIfSmi(src.gp(), &exit);
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
DecompressTaggedPointer(src.gp(), src.gp());
|
DecompressTagged(src.gp(), src.gp());
|
||||||
}
|
}
|
||||||
CheckPageFlag(src.gp(), r1,
|
CheckPageFlag(src.gp(), r1,
|
||||||
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
||||||
|
@ -352,7 +352,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
|
|||||||
Register instance,
|
Register instance,
|
||||||
int offset) {
|
int offset) {
|
||||||
DCHECK_LE(0, offset);
|
DCHECK_LE(0, offset);
|
||||||
LoadTaggedPointerField(dst, Operand(instance, offset));
|
LoadTaggedField(dst, Operand(instance, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
|
void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
|
||||||
@ -381,7 +381,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
|
|||||||
Operand src_op =
|
Operand src_op =
|
||||||
liftoff::GetMemOp(this, src_addr, offset_reg,
|
liftoff::GetMemOp(this, src_addr, offset_reg,
|
||||||
static_cast<uint32_t>(offset_imm), scale_factor);
|
static_cast<uint32_t>(offset_imm), scale_factor);
|
||||||
LoadTaggedPointerField(dst, src_op);
|
LoadTaggedField(dst, src_op);
|
||||||
}
|
}
|
||||||
|
|
||||||
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
|
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
|
||||||
@ -414,7 +414,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
|
|||||||
bind(&write_barrier);
|
bind(&write_barrier);
|
||||||
JumpIfSmi(src.gp(), &exit, Label::kNear);
|
JumpIfSmi(src.gp(), &exit, Label::kNear);
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
DecompressTaggedPointer(src.gp(), src.gp());
|
DecompressTagged(src.gp(), src.gp());
|
||||||
}
|
}
|
||||||
CheckPageFlag(src.gp(), scratch,
|
CheckPageFlag(src.gp(), scratch,
|
||||||
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
||||||
|
@ -28,7 +28,7 @@ uintptr_t EnsureDecompressed(uintptr_t address,
|
|||||||
if (!COMPRESS_POINTERS_BOOL || !IsPointerCompressed(address)) return address;
|
if (!COMPRESS_POINTERS_BOOL || !IsPointerCompressed(address)) return address;
|
||||||
// TODO(v8:11880): ExternalCodeCompressionScheme might be needed here for
|
// TODO(v8:11880): ExternalCodeCompressionScheme might be needed here for
|
||||||
// decompressing Code pointers from external code space.
|
// decompressing Code pointers from external code space.
|
||||||
return i::V8HeapCompressionScheme::DecompressTaggedAny(
|
return i::V8HeapCompressionScheme::DecompressTagged(
|
||||||
any_uncompressed_ptr, static_cast<i::Tagged_t>(address));
|
any_uncompressed_ptr, static_cast<i::Tagged_t>(address));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ if (hasattr(v8heapconst, 'HEAP_FIRST_PAGES')): # Only exists in ptr-compr build
|
|||||||
if (space_name in expected_spaces):
|
if (space_name in expected_spaces):
|
||||||
out = out + ' if (heap_addresses->' + space_name + '_first_page == 0) {\n'
|
out = out + ' if (heap_addresses->' + space_name + '_first_page == 0) {\n'
|
||||||
out = out + ' heap_addresses->' + space_name + \
|
out = out + ' heap_addresses->' + space_name + \
|
||||||
'_first_page = i::V8HeapCompressionScheme::DecompressTaggedPointer(' + \
|
'_first_page = i::V8HeapCompressionScheme::DecompressTagged(' + \
|
||||||
'any_uncompressed_ptr, ' + str(offset) + ');\n'
|
'any_uncompressed_ptr, ' + str(offset) + ');\n'
|
||||||
out = out + ' }\n'
|
out = out + ' }\n'
|
||||||
out = out + '}\n'
|
out = out + '}\n'
|
||||||
|
Loading…
Reference in New Issue
Block a user