[*] Merged V11.2

(unstable?)

Last aurora commit: 465f7783
This commit is contained in:
Reece Wilson 2023-02-06 23:19:22 +00:00
commit 87131415c9
291 changed files with 9534 additions and 8075 deletions

View File

@ -1243,8 +1243,8 @@ filegroup(
"src/codegen/tick-counter.h",
"src/codegen/tnode.cc",
"src/codegen/tnode.h",
"src/codegen/turbo-assembler.cc",
"src/codegen/turbo-assembler.h",
"src/codegen/macro-assembler-base.cc",
"src/codegen/macro-assembler-base.h",
"src/codegen/unoptimized-compilation-info.cc",
"src/codegen/unoptimized-compilation-info.h",
"src/common/assert-scope.cc",

View File

@ -2833,6 +2833,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/interface-descriptors.h",
"src/codegen/label.h",
"src/codegen/machine-type.h",
"src/codegen/macro-assembler-base.h",
"src/codegen/macro-assembler-inl.h",
"src/codegen/macro-assembler.h",
"src/codegen/maglev-safepoint-table.h",
@ -2853,7 +2854,6 @@ v8_header_set("v8_internal_headers") {
"src/codegen/source-position.h",
"src/codegen/tick-counter.h",
"src/codegen/tnode.h",
"src/codegen/turbo-assembler.h",
"src/codegen/unoptimized-compilation-info.h",
"src/common/assert-scope.h",
"src/common/checks.h",
@ -4581,6 +4581,7 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/handler-table.cc",
"src/codegen/interface-descriptors.cc",
"src/codegen/machine-type.cc",
"src/codegen/macro-assembler-base.cc",
"src/codegen/maglev-safepoint-table.cc",
"src/codegen/optimized-compilation-info.cc",
"src/codegen/pending-optimization-table.cc",
@ -4591,7 +4592,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/source-position.cc",
"src/codegen/tick-counter.cc",
"src/codegen/tnode.cc",
"src/codegen/turbo-assembler.cc",
"src/codegen/unoptimized-compilation-info.cc",
"src/common/assert-scope.cc",
"src/common/code-memory-access.cc",
@ -5163,7 +5163,7 @@ v8_source_set("v8_base_without_compiler") {
if (v8_enable_webassembly) {
# Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux,
# Mac, and Windows.
if ((current_cpu == "arm64" && is_mac) ||
if ((current_cpu == "arm64" && is_apple) ||
(current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
sources += [
"src/trap-handler/handler-inside-posix.cc",

22
DEPS
View File

@ -63,12 +63,12 @@ vars = {
'ninja_version': 'version:2@1.11.1.chromium.6',
# luci-go CIPD package version.
'luci_go': 'git_revision:c41d94e382727fc5276cd2771741990543fce337',
'luci_go': 'git_revision:46eca1e3a280c340bf58f967aaded13c87ca3859',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other.
'fuchsia_version': 'version:11.20230131.1.1',
'fuchsia_version': 'version:11.20230202.3.1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@ -106,11 +106,11 @@ vars = {
deps = {
'base/trace_event/common':
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68e6038b5350cba18c341cc7c572170af5c5b20c',
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '05a225a3e0bbd6fb6a9cac02d482ab784194411d',
'build':
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e0df145ecb560e48381b6dccf3b9c8b31aa95bcd',
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'd0fad164969ab7f41f163f9ee738ea692f43df53',
'buildtools':
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '295c6e5037e358904aef73a21409896d58547ba6',
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '5408fe0e010a7d36bb2684d5f38df67dcdfe31de',
'buildtools/clang_format/script':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'f97059df7f8b205064625cdb5f97b56668a125ef',
'buildtools/linux64': {
@ -134,7 +134,7 @@ deps = {
'condition': 'host_os == "mac"',
},
'buildtools/third_party/libc++/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '59bae40d835ae4eabaddbef781f5e3b778dd7907',
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '549781a48cef7a038cadbe8ae9034c2d63685d9a',
'buildtools/third_party/libc++abi/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'b74d7716111d7eda5c03cb8f5dfc940e1c2c0030',
'buildtools/third_party/libunwind/trunk':
@ -164,7 +164,7 @@ deps = {
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f00d4118dba5d266d1611ba2cd4e995d3e4b523a',
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'e7364ea7dc36a466edb2db5ef0a8e66da8dabb7d',
'third_party/android_ndk': {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d',
'condition': 'checkout_android',
@ -212,7 +212,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
'url': Var('chromium_url') + '/catapult.git' + '@' + '5a468ccd919e16a29bb3121e3c90f27bf8745942',
'url': Var('chromium_url') + '/catapult.git' + '@' + 'd0d703ea303c91f3afe39ebf8d2d4c9342accedc',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@ -220,7 +220,7 @@ deps = {
'condition': 'checkout_android',
},
'third_party/depot_tools':
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '3d072ab6fb49fd3d2116a41cee66d47c3d409299',
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ef2d011ad3041801565aa8c6d1418cc82c0ddb2e',
'third_party/fuchsia-sdk/sdk': {
'packages': [
{
@ -237,9 +237,9 @@ deps = {
'third_party/googletest/src':
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07',
'third_party/icu':
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '2c51e5cc7e0a06cd4cd7cb2ddbac445af9b475ba',
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '266a46937f05303da1ac4c68f2c94f9a1caa3f76',
'third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '09ba70cfb2c0d01c60684660e357ae200caf2968',
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '63d81e44712888bf70d574d5a96fa591994b9acc',
'third_party/ittapi': {
# Force checkout ittapi libraries to pass v8 header includes check on
# bots that has check_v8_header_includes enabled.

View File

@ -104,6 +104,9 @@
'trap-handler': {
'filepath': 'src/trap-handler/',
},
'tests': {
'filepath': 'test/',
},
},
'WATCHLISTS': {
@ -124,6 +127,7 @@
],
'feature_shipping_status': [
'hablich@chromium.org',
'saelo+watch@chromium.org',
],
'heap_changes': [
'hpayer@chromium.org',
@ -176,5 +180,8 @@
'mark@chromium.org',
'mseaborn@chromium.org',
],
'tests': [
'almuthanna+watch@chromium.org',
],
},
}

View File

@ -17,6 +17,11 @@
namespace cppgc {
namespace internal {
enum class WriteBarrierSlotType {
kCompressed,
kUncompressed,
};
#if defined(CPPGC_POINTER_COMPRESSION)
#if defined(__clang__)
@ -64,6 +69,8 @@ class CageBaseGlobal final {
class V8_TRIVIAL_ABI CompressedPointer final {
public:
using IntegralType = uint32_t;
static constexpr auto kWriteBarrierSlotType =
WriteBarrierSlotType::kCompressed;
V8_INLINE CompressedPointer() : value_(0u) {}
V8_INLINE explicit CompressedPointer(const void* ptr)
@ -173,6 +180,8 @@ class V8_TRIVIAL_ABI CompressedPointer final {
class V8_TRIVIAL_ABI RawPointer final {
public:
using IntegralType = uintptr_t;
static constexpr auto kWriteBarrierSlotType =
WriteBarrierSlotType::kUncompressed;
V8_INLINE RawPointer() : ptr_(nullptr) {}
V8_INLINE explicit RawPointer(const void* ptr) : ptr_(ptr) {}

View File

@ -33,10 +33,11 @@ struct DijkstraWriteBarrierPolicy {
// barrier doesn't break the tri-color invariant.
}
template <WriteBarrierSlotType SlotType>
V8_INLINE static void AssigningBarrier(const void* slot, const void* value) {
#ifdef CPPGC_SLIM_WRITE_BARRIER
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
WriteBarrier::CombinedWriteBarrierSlow(slot);
WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
#else // !CPPGC_SLIM_WRITE_BARRIER
WriteBarrier::Params params;
const WriteBarrier::Type type =
@ -45,12 +46,14 @@ struct DijkstraWriteBarrierPolicy {
#endif // !CPPGC_SLIM_WRITE_BARRIER
}
template <typename MemberStorage>
V8_INLINE static void AssigningBarrier(const void* slot,
MemberStorage storage) {
template <WriteBarrierSlotType SlotType>
V8_INLINE static void AssigningBarrier(const void* slot, RawPointer storage) {
static_assert(
SlotType == WriteBarrierSlotType::kUncompressed,
"Assigning storages of Member and UncompressedMember is not supported");
#ifdef CPPGC_SLIM_WRITE_BARRIER
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
WriteBarrier::CombinedWriteBarrierSlow(slot);
WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
#else // !CPPGC_SLIM_WRITE_BARRIER
WriteBarrier::Params params;
const WriteBarrier::Type type =
@ -59,6 +62,25 @@ struct DijkstraWriteBarrierPolicy {
#endif // !CPPGC_SLIM_WRITE_BARRIER
}
#if defined(CPPGC_POINTER_COMPRESSION)
template <WriteBarrierSlotType SlotType>
V8_INLINE static void AssigningBarrier(const void* slot,
CompressedPointer storage) {
static_assert(
SlotType == WriteBarrierSlotType::kCompressed,
"Assigning storages of Member and UncompressedMember is not supported");
#ifdef CPPGC_SLIM_WRITE_BARRIER
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
#else // !CPPGC_SLIM_WRITE_BARRIER
WriteBarrier::Params params;
const WriteBarrier::Type type =
WriteBarrier::GetWriteBarrierType(slot, storage, params);
WriteBarrier(type, params, slot, storage.Load());
#endif // !CPPGC_SLIM_WRITE_BARRIER
}
#endif // defined(CPPGC_POINTER_COMPRESSION)
private:
V8_INLINE static void WriteBarrier(WriteBarrier::Type type,
const WriteBarrier::Params& params,
@ -79,8 +101,9 @@ struct DijkstraWriteBarrierPolicy {
struct NoWriteBarrierPolicy {
V8_INLINE static void InitializingBarrier(const void*, const void*) {}
template <WriteBarrierSlotType>
V8_INLINE static void AssigningBarrier(const void*, const void*) {}
template <typename MemberStorage>
template <WriteBarrierSlotType, typename MemberStorage>
V8_INLINE static void AssigningBarrier(const void*, MemberStorage) {}
};

View File

@ -84,6 +84,7 @@ class V8_EXPORT WriteBarrier final {
// A write barrier that combines `GenerationalBarrier()` and
// `DijkstraMarkingBarrier()`. We only pass a single parameter here to clobber
// as few registers as possible.
template <WriteBarrierSlotType>
static V8_NOINLINE void V8_PRESERVE_MOST
CombinedWriteBarrierSlow(const void* slot);
#endif // CPPGC_SLIM_WRITE_BARRIER

View File

@ -309,11 +309,13 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase<StorageType>,
WriteBarrierPolicy::InitializingBarrier(Base::GetRawSlot(), value);
}
V8_INLINE void AssigningWriteBarrier(T* value) const {
WriteBarrierPolicy::AssigningBarrier(Base::GetRawSlot(), value);
WriteBarrierPolicy::template AssigningBarrier<
StorageType::kWriteBarrierSlotType>(Base::GetRawSlot(), value);
}
V8_INLINE void AssigningWriteBarrier() const {
WriteBarrierPolicy::AssigningBarrier(Base::GetRawSlot(),
Base::GetRawStorage());
WriteBarrierPolicy::template AssigningBarrier<
StorageType::kWriteBarrierSlotType>(Base::GetRawSlot(),
Base::GetRawStorage());
}
V8_INLINE void ClearFromGC() const { Base::ClearFromGC(); }

View File

@ -365,8 +365,7 @@ Local<Value> Context::GetEmbedderData(int index) {
#ifdef V8_COMPRESS_POINTERS
// We read the full pointer value and then decompress it in order to avoid
// dealing with potential endiannes issues.
value =
I::DecompressTaggedAnyField(embedder_data, static_cast<uint32_t>(value));
value = I::DecompressTaggedField(embedder_data, static_cast<uint32_t>(value));
#endif
internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
*reinterpret_cast<A*>(this));

View File

@ -880,7 +880,7 @@ class Internals {
return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
}
V8_INLINE static internal::Address DecompressTaggedAnyField(
V8_INLINE static internal::Address DecompressTaggedField(
internal::Address heap_object_ptr, uint32_t value) {
internal::Address base =
GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);

View File

@ -542,6 +542,7 @@ class V8_EXPORT Isolate {
kAsyncStackTaggingCreateTaskCall = 116,
kDurationFormat = 117,
kInvalidatedNumberStringPrototypeNoReplaceProtector = 118,
kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode = 119,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to

View File

@ -717,7 +717,7 @@ Local<Value> Object::GetInternalField(int index) {
#ifdef V8_COMPRESS_POINTERS
// We read the full pointer value and then decompress it in order to avoid
// dealing with potential endiannes issues.
value = I::DecompressTaggedAnyField(obj, static_cast<uint32_t>(value));
value = I::DecompressTaggedField(obj, static_cast<uint32_t>(value));
#endif
internal::Isolate* isolate =
internal::IsolateFromNeverReadOnlySpaceObject(obj);

View File

@ -346,12 +346,15 @@ path. Add it with -I<path> to the command line
# define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
// Support for the "preserve_most" attribute is incomplete on 32-bit, and we see
// failures in component builds. Thus only use it in 64-bit non-component builds
// for now.
#if (defined(_M_X64) || defined(__x86_64__) || defined(__AARCH64EL__) || \
defined(_M_ARM64)) /* x64 or arm64 */ \
&& !defined(COMPONENT_BUILD)
// Support for the "preserve_most" attribute is limited:
// - 32-bit platforms do not implement it,
// - component builds fail because _dl_runtime_resolve clobbers registers,
// - we see crashes on arm64 on Windows (https://crbug.com/1409934), which can
// hopefully be fixed in the future.
#if (defined(_M_X64) || defined(__x86_64__) /* x64 (everywhere) */ \
|| ((defined(__AARCH64EL__) || defined(_M_ARM64)) /* arm64, but ... */ \
&& !defined(_WIN32))) /* not on windows */ \
&& !defined(COMPONENT_BUILD) /* no component build */
# define V8_HAS_ATTRIBUTE_PRESERVE_MOST (__has_attribute(preserve_most))
#endif
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))

View File

@ -48,14 +48,13 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
if (header == nullptr) continue;
unsigned long size;
#if V8_HOST_ARCH_I32
unsigned int size;
char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
uint8_t* code_ptr = getsectiondata(header, SEG_TEXT, SECT_TEXT, &size);
#else
uint64_t size;
char* code_ptr = getsectdatafromheader_64(
reinterpret_cast<const mach_header_64*>(header), SEG_TEXT, SECT_TEXT,
&size);
const mach_header_64* header64 =
reinterpret_cast<const mach_header_64*>(header);
uint8_t* code_ptr = getsectiondata(header64, SEG_TEXT, SECT_TEXT, &size);
#endif
if (code_ptr == nullptr) continue;
const intptr_t slide = _dyld_get_image_vmaddr_slide(i);

View File

@ -81,13 +81,13 @@ class SmallVector {
begin_ = other.begin_;
end_ = other.end_;
end_of_storage_ = other.end_of_storage_;
other.reset_to_inline_storage();
} else {
DCHECK_GE(capacity(), other.size()); // Sanity check.
size_t other_size = other.size();
memcpy(begin_, other.begin_, sizeof(T) * other_size);
end_ = begin_ + other_size;
}
other.reset_to_inline_storage();
return *this;
}

View File

@ -309,8 +309,8 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
void BaselineAssembler::LoadTaggedField(Register output, Register source,
int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
@ -326,11 +326,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ ldrh(output, FieldMemOperand(source, offset));
@ -372,8 +367,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
LoadTaggedField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
@ -398,8 +393,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ ldr(interrupt_budget,
@ -421,8 +416,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ ldr(interrupt_budget,
@ -437,16 +432,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
LoadTaggedField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@ -455,33 +450,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@ -570,8 +561,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}

View File

@ -369,9 +369,9 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
void BaselineAssembler::LoadTaggedField(Register output, Register source,
int offset) {
__ LoadTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
@ -386,11 +386,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Ldrh(output, FieldMemOperand(source, offset));
@ -440,8 +435,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch().W();
__ Ldr(interrupt_budget,
@ -463,8 +458,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch().W();
__ Ldr(interrupt_budget,
@ -479,16 +474,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
LoadTaggedField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@ -497,33 +492,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@ -571,7 +562,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
{
const int instruction_count =
num_labels * instructions_per_label + instructions_per_jump_target;
TurboAssembler::BlockPoolsScope block_pools(masm_,
MacroAssembler::BlockPoolsScope block_pools(masm_,
instruction_count * kInstrSize);
__ Bind(&table);
for (int i = 0; i < num_labels; ++i) {
@ -630,7 +621,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIncludesReceiver);
__ masm()->DropArguments(params_size, MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}

View File

@ -114,13 +114,12 @@ void BaselineAssembler::SmiUntag(Register output, Register value) {
void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
int32_t index) {
LoadTaggedAnyField(output, array,
FixedArray::kHeaderSize + index * kTaggedSize);
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
__ LoadMap(prototype, object);
LoadTaggedPointerField(prototype, prototype, Map::kPrototypeOffset);
LoadTaggedField(prototype, prototype, Map::kPrototypeOffset);
}
void BaselineAssembler::LoadContext(Register output) {
LoadRegister(output, interpreter::Register::current_context());

View File

@ -147,13 +147,11 @@ class BaselineAssembler {
inline void TailCallBuiltin(Builtin builtin);
inline void CallRuntime(Runtime::FunctionId function, int nargs);
inline void LoadTaggedPointerField(Register output, Register source,
int offset);
inline void LoadTaggedField(Register output, Register source, int offset);
inline void LoadTaggedSignedField(Register output, Register source,
int offset);
inline void LoadTaggedSignedFieldAndUntag(Register output, Register source,
int offset);
inline void LoadTaggedAnyField(Register output, Register source, int offset);
inline void LoadWord16FieldZeroExtend(Register output, Register source,
int offset);
inline void LoadWord8Field(Register output, Register source, int offset);
@ -170,16 +168,12 @@ class BaselineAssembler {
// X64 supports complex addressing mode, pointer decompression can be done by
// [%compressed_base + %r1 + K].
#if V8_TARGET_ARCH_X64
inline void LoadTaggedPointerField(TaggedRegister output, Register source,
int offset);
inline void LoadTaggedPointerField(TaggedRegister output,
TaggedRegister source, int offset);
inline void LoadTaggedPointerField(Register output, TaggedRegister source,
int offset);
inline void LoadTaggedAnyField(Register output, TaggedRegister source,
int offset);
inline void LoadTaggedAnyField(TaggedRegister output, TaggedRegister source,
int offset);
inline void LoadTaggedField(TaggedRegister output, Register source,
int offset);
inline void LoadTaggedField(TaggedRegister output, TaggedRegister source,
int offset);
inline void LoadTaggedField(Register output, TaggedRegister source,
int offset);
inline void LoadFixedArrayElement(Register output, TaggedRegister array,
int32_t index);
inline void LoadFixedArrayElement(TaggedRegister output, TaggedRegister array,

View File

@ -439,8 +439,8 @@ void BaselineCompiler::LoadFeedbackVector(Register output) {
void BaselineCompiler::LoadClosureFeedbackArray(Register output) {
LoadFeedbackVector(output);
__ LoadTaggedPointerField(output, output,
FeedbackVector::kClosureFeedbackCellArrayOffset);
__ LoadTaggedField(output, output,
FeedbackVector::kClosureFeedbackCellArrayOffset);
}
void BaselineCompiler::SelectBooleanConstant(
@ -754,8 +754,8 @@ void BaselineCompiler::VisitLdaCurrentContextSlot() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register context = scratch_scope.AcquireScratch();
__ LoadContext(context);
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(Index(0)));
__ LoadTaggedField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(Index(0)));
}
void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() {
@ -1350,9 +1350,9 @@ void BaselineCompiler::VisitIntrinsicCreateJSGeneratorObject(
void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode(
interpreter::RegisterList args) {
__ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister,
JSGeneratorObject::kResumeModeOffset);
__ LoadTaggedField(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister,
JSGeneratorObject::kResumeModeOffset);
}
void BaselineCompiler::VisitIntrinsicGeneratorClose(
@ -2211,8 +2211,8 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() {
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
Register context = scratch_scope.AcquireScratch();
__ LoadTaggedAnyField(context, generator_object,
JSGeneratorObject::kContextOffset);
__ LoadTaggedField(context, generator_object,
JSGeneratorObject::kContextOffset);
__ StoreContext(context);
interpreter::JumpTableTargetOffsets offsets =

View File

@ -293,8 +293,8 @@ void BaselineAssembler::Pop(T... registers) {
(__ Pop(registers), ...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
void BaselineAssembler::LoadTaggedField(Register output, Register source,
int offset) {
__ mov(output, FieldOperand(source, offset));
}
@ -310,11 +310,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ mov(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ movzx_w(output, FieldOperand(source, offset));
@ -354,8 +349,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance distance) {
Label fallthrough;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
LoadTaggedField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
@ -378,8 +373,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
Immediate(weight));
if (skip_interrupt_label) {
@ -395,8 +390,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
Register feedback_cell = scratch_scope.AcquireScratch();
DCHECK(!AreAliased(feedback_cell, weight));
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
weight);
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
@ -405,16 +400,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
LoadTaggedField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@ -423,33 +418,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@ -539,8 +530,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, scratch,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}

View File

@ -296,8 +296,8 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
void BaselineAssembler::LoadTaggedField(Register output, Register source,
int offset) {
__ Ld_d(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
@ -310,10 +310,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
LoadTaggedSignedField(output, source, offset);
SmiUntag(output);
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Ld_d(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Ld_hu(output, FieldMemOperand(source, offset));
@ -350,8 +346,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
LoadTaggedField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
@ -374,8 +370,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Ld_w(interrupt_budget,
@ -394,8 +390,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Ld_w(interrupt_budget,
@ -410,16 +406,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
LoadTaggedField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@ -428,33 +424,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@ -533,8 +525,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}

View File

@ -304,8 +304,8 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
void BaselineAssembler::LoadTaggedField(Register output, Register source,
int offset) {
__ Ld(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
@ -318,10 +318,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
LoadTaggedSignedField(output, source, offset);
SmiUntag(output);
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Ld(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Lhu(output, FieldMemOperand(source, offset));
@ -360,8 +356,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
LoadTaggedField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
@ -384,8 +380,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Lw(interrupt_budget,
@ -404,8 +400,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Lw(interrupt_budget,
@ -420,16 +416,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
LoadTaggedField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@ -438,33 +434,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@ -544,8 +536,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}

View File

@ -49,31 +49,6 @@ class BaselineAssembler::ScratchRegisterScope {
int registers_used_;
};
inline bool IsSignedCondition(Condition cond) {
switch (cond) {
case kEqual:
case kNotEqual:
case kLessThan:
case kGreaterThan:
case kLessThanEqual:
case kGreaterThanEqual:
case kOverflow:
case kNoOverflow:
case kZero:
case kNotZero:
return true;
case kUnsignedLessThan:
case kUnsignedGreaterThan:
case kUnsignedLessThanEqual:
case kUnsignedGreaterThanEqual:
return false;
default:
UNREACHABLE();
}
}
#define __ assm->
// ppc helper
template <int width = 64>
@ -82,19 +57,19 @@ static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs,
static_assert(width == 64 || width == 32,
"only support 64 and 32 bit compare");
if (width == 64) {
if (IsSignedCondition(cc)) {
if (is_signed(cc)) {
__ CmpS64(lhs, rhs);
} else {
__ CmpU64(lhs, rhs);
}
} else {
if (IsSignedCondition(cc)) {
if (is_signed(cc)) {
__ CmpS32(lhs, rhs);
} else {
__ CmpU32(lhs, rhs);
}
}
__ b(check_condition(cc), target);
__ b(to_condition(cc), target);
}
#undef __
@ -160,18 +135,18 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_);
__ AndU64(r0, value, Operand(mask), ip, SetRC);
__ b(check_condition(cc), target, cr0);
__ b(to_condition(cc), target, cr0);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_);
if (IsSignedCondition(cc)) {
if (is_signed(cc)) {
__ CmpS64(lhs, rhs, r0);
} else {
__ CmpU64(lhs, rhs, r0);
}
__ b(check_condition(cc), target);
__ b(to_condition(cc), target);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
@ -231,7 +206,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
ASM_CODE_COMMENT(masm_);
__ LoadTaggedPointerField(ip, operand, r0);
__ LoadTaggedField(ip, operand, r0);
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
}
@ -239,7 +214,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance) {
ASM_CODE_COMMENT(masm_);
__ LoadTaggedPointerField(ip, operand, r0);
__ LoadTaggedField(ip, operand, r0);
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
}
@ -399,10 +374,10 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
void BaselineAssembler::LoadTaggedField(Register output, Register source,
int offset) {
ASM_CODE_COMMENT(masm_);
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
__ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
@ -418,12 +393,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
ASM_CODE_COMMENT(masm_);
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
ASM_CODE_COMMENT(masm_);
@ -468,8 +437,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
LoadTaggedField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
@ -494,8 +463,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ LoadU32(
@ -519,8 +488,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ LoadU32(
@ -538,17 +507,17 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
ASM_CODE_COMMENT(masm_);
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
LoadTaggedField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
ASM_CODE_COMMENT(masm_);
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@ -558,34 +527,30 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
ASM_CODE_COMMENT(masm_);
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
ASM_CODE_COMMENT(masm_);
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@ -684,8 +649,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}

View File

@ -297,9 +297,9 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
void BaselineAssembler::LoadTaggedField(Register output, Register source,
int offset) {
__ LoadTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
@ -311,10 +311,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
LoadTaggedSignedField(output, source, offset);
SmiUntag(output);
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Lhu(output, FieldMemOperand(source, offset));
@ -351,8 +347,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough, clear_slot;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
LoadTaggedField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
@ -379,8 +375,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Lw(interrupt_budget,
@ -401,8 +397,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Lw(interrupt_budget,
@ -419,16 +415,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
LoadTaggedField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@ -437,33 +433,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@ -508,7 +500,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
__ CalcScaledAddress(t6, t6, reg, entry_size_log2);
__ Jump(t6);
{
TurboAssembler::BlockTrampolinePoolScope(masm());
MacroAssembler::BlockTrampolinePoolScope(masm());
__ BlockTrampolinePoolFor(num_labels * kInstrSize * 2);
__ bind(&table);
for (int i = 0; i < num_labels; ++i) {

View File

@ -48,31 +48,6 @@ class BaselineAssembler::ScratchRegisterScope {
int registers_used_;
};
inline bool IsSignedCondition(Condition cond) {
switch (cond) {
case kEqual:
case kNotEqual:
case kLessThan:
case kGreaterThan:
case kLessThanEqual:
case kGreaterThanEqual:
case kOverflow:
case kNoOverflow:
case kZero:
case kNotZero:
return true;
case kUnsignedLessThan:
case kUnsignedGreaterThan:
case kUnsignedLessThanEqual:
case kUnsignedGreaterThanEqual:
return false;
default:
UNREACHABLE();
}
}
#define __ assm->
// s390x helper
template <int width = 64>
@ -81,19 +56,19 @@ static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs,
static_assert(width == 64 || width == 32,
"only support 64 and 32 bit compare");
if (width == 64) {
if (IsSignedCondition(cc)) {
if (is_signed(cc)) {
__ CmpS64(lhs, rhs);
} else {
__ CmpU64(lhs, rhs);
}
} else {
if (IsSignedCondition(cc)) {
if (is_signed(cc)) {
__ CmpS32(lhs, rhs);
} else {
__ CmpU32(lhs, rhs);
}
}
__ b(check_condition(cc), target);
__ b(to_condition(cc), target);
}
#undef __
@ -159,18 +134,18 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_);
__ AndP(r0, value, Operand(mask));
__ b(check_condition(cc), target);
__ b(to_condition(cc), target);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_);
if (IsSignedCondition(cc)) {
if (is_signed(cc)) {
__ CmpS64(lhs, rhs);
} else {
__ CmpU64(lhs, rhs);
}
__ b(check_condition(cc), target);
__ b(to_condition(cc), target);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
@ -236,9 +211,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
if (COMPRESS_POINTERS_BOOL) {
MemOperand addr =
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
__ LoadTaggedPointerField(ip, addr, r0);
__ LoadTaggedField(ip, addr, r0);
} else {
__ LoadTaggedPointerField(ip, operand, r0);
__ LoadTaggedField(ip, operand, r0);
}
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
}
@ -251,9 +226,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
if (COMPRESS_POINTERS_BOOL) {
MemOperand addr =
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
__ LoadTaggedPointerField(ip, addr, r0);
__ LoadTaggedField(ip, addr, r0);
} else {
__ LoadTaggedPointerField(ip, operand, r0);
__ LoadTaggedField(ip, operand, r0);
}
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, ip, value, target);
}
@ -412,10 +387,10 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
void BaselineAssembler::LoadTaggedField(Register output, Register source,
int offset) {
ASM_CODE_COMMENT(masm_);
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
__ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
@ -431,12 +406,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
ASM_CODE_COMMENT(masm_);
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
ASM_CODE_COMMENT(masm_);
@ -481,8 +450,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
LoadTaggedField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
@ -507,8 +476,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ LoadU32(
@ -532,8 +501,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ LoadU32(
@ -550,16 +519,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
LoadTaggedField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@ -568,33 +537,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
LoadTaggedField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(context, context, Context::kExtensionOffset);
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@ -692,8 +657,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}

View File

@ -287,9 +287,9 @@ void BaselineAssembler::Pop(T... registers) {
(__ Pop(registers), ...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
void BaselineAssembler::LoadTaggedField(Register output, Register source,
int offset) {
__ LoadTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
@ -300,10 +300,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
int offset) {
__ SmiUntagField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ movzxwq(output, FieldOperand(source, offset));
@ -331,45 +327,31 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
__ StoreTaggedField(FieldOperand(target, offset), value);
}
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
Register source, int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
void BaselineAssembler::LoadTaggedField(TaggedRegister output, Register source,
int offset) {
__ LoadTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
TaggedRegister source,
int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
void BaselineAssembler::LoadTaggedField(TaggedRegister output,
TaggedRegister source, int offset) {
__ LoadTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedPointerField(Register output,
TaggedRegister source,
int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output,
TaggedRegister source, int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(TaggedRegister output,
TaggedRegister source, int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
void BaselineAssembler::LoadTaggedField(Register output, TaggedRegister source,
int offset) {
__ LoadTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadFixedArrayElement(Register output,
TaggedRegister array,
int32_t index) {
LoadTaggedAnyField(output, array,
FixedArray::kHeaderSize + index * kTaggedSize);
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output,
TaggedRegister array,
int32_t index) {
LoadTaggedAnyField(output, array,
FixedArray::kHeaderSize + index * kTaggedSize);
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
@ -389,8 +371,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
LoadFunction(feedback_cell);
// Decompresses pointer by complex addressing mode when necessary.
TaggedRegister tagged(feedback_cell);
LoadTaggedPointerField(tagged, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset),
Immediate(weight));
if (skip_interrupt_label) {
@ -407,8 +388,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
LoadFunction(feedback_cell);
// Decompresses pointer by complex addressing mode when necessary.
TaggedRegister tagged(feedback_cell);
LoadTaggedPointerField(tagged, feedback_cell,
JSFunction::kFeedbackCellOffset);
LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight);
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
}
@ -420,17 +400,17 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
// addressing mode, any intermediate context pointer is loaded in compressed
// form.
if (depth == 0) {
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
LoadTaggedField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
} else {
TaggedRegister tagged(context);
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
LoadTaggedField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
Context::OffsetOfElementAt(index));
LoadTaggedField(kInterpreterAccumulatorRegister, tagged,
Context::OffsetOfElementAt(index));
}
}
@ -442,10 +422,10 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
// form.
if (depth > 0) {
TaggedRegister tagged(context);
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
LoadTaggedField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
}
if (COMPRESS_POINTERS_BOOL) {
// Decompress tagged pointer.
@ -463,29 +443,26 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
// enabled, any intermediate context pointer is loaded in compressed form.
TaggedRegister tagged(context);
if (depth == 0) {
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
LoadTaggedField(tagged, context, Context::kExtensionOffset);
} else {
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
LoadTaggedField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
}
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
LoadTaggedField(tagged, tagged, Context::kExtensionOffset);
}
if (cell_index > 0) {
LoadTaggedPointerField(tagged, tagged,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(tagged, tagged,
SourceTextModule::kRegularImportsOffset);
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(tagged, tagged, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
Cell::kValueOffset);
LoadTaggedField(kInterpreterAccumulatorRegister, tagged, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
@ -495,17 +472,16 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
// enabled, any intermediate context pointer is loaded in compressed form.
TaggedRegister tagged(context);
if (depth == 0) {
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
LoadTaggedField(tagged, context, Context::kExtensionOffset);
} else {
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
LoadTaggedField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
}
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
LoadTaggedField(tagged, tagged, Context::kExtensionOffset);
}
LoadTaggedPointerField(tagged, tagged,
SourceTextModule::kRegularExportsOffset);
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@ -587,8 +563,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, scratch,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}

View File

@ -130,8 +130,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(scratch, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver);
__ Jump(lr);
__ bind(&stack_overflow);
@ -278,8 +278,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ DropArguments(r1, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(r1, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver);
__ Jump(lr);
__ bind(&check_receiver);
@ -826,8 +826,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
__ DropArguments(params_size, TurboAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(params_size, MacroAssembler::kCountIsBytes,
MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@ -1352,7 +1352,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ sub(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, scratch,
TurboAssembler::PushArrayOrder::kReverse);
MacroAssembler::PushArrayOrder::kReverse);
}
// static
@ -1820,8 +1820,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
__ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
__ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -1897,8 +1897,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
__ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
__ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -1940,8 +1940,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
__ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
__ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArgumentsAndPushNewReceiver(r0, r4, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------

View File

@ -163,7 +163,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
__ DropArguments(x1, TurboAssembler::kCountIncludesReceiver);
__ DropArguments(x1, MacroAssembler::kCountIncludesReceiver);
__ Ret();
__ Bind(&stack_overflow);
@ -213,7 +213,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
// -----------------------------------
__ LoadTaggedPointerField(
__ LoadTaggedField(
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
@ -348,7 +348,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Leave construct frame.
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ DropArguments(x1, TurboAssembler::kCountIncludesReceiver);
__ DropArguments(x1, MacroAssembler::kCountIncludesReceiver);
__ Ret();
// Otherwise we do a smi check and fall through to check if the return value
@ -423,7 +423,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
}
__ Cmp(scratch1, INTERPRETER_DATA_TYPE);
__ B(ne, &done);
__ LoadTaggedPointerField(
__ LoadTaggedField(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ Bind(&done);
@ -446,10 +446,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(x1);
// Load suspended function and context.
__ LoadTaggedPointerField(
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedPointerField(cp,
FieldMemOperand(x4, JSFunction::kContextOffset));
__ LoadTaggedField(x4,
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@ -477,7 +476,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
__ LoadTaggedPointerField(
__ LoadTaggedField(
x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(w10, FieldMemOperand(
x10, SharedFunctionInfo::kFormalParameterCountOffset));
@ -493,8 +492,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
// Poke receiver into highest claimed slot.
__ LoadTaggedPointerField(
x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
__ LoadTaggedField(x5,
FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
__ Poke(x5, __ ReceiverOperand(x10));
// ----------- S t a t e -------------
@ -507,7 +506,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
__ LoadTaggedPointerField(
__ LoadTaggedField(
x5,
FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
{
@ -518,7 +517,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Bind(&loop);
__ Sub(x10, x10, 1);
__ LoadAnyTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
__ LoadTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
__ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
__ Cbnz(x10, &loop);
__ Bind(&done);
@ -527,9 +526,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline;
__ LoadTaggedPointerField(
__ LoadTaggedField(
x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecodeOrBaseline(masm, x3, x0, &is_baseline);
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
@ -539,7 +538,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ LoadTaggedPointerField(
__ LoadTaggedField(
x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(w0, FieldMemOperand(
x0, SharedFunctionInfo::kFormalParameterCountOffset));
@ -549,7 +548,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Mov(x3, x1);
__ Mov(x1, x4);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ LoadTaggedField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ JumpCodeObject(x2);
}
@ -561,8 +560,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(x1, padreg, x4, x5);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(padreg, x1);
__ LoadTaggedPointerField(
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(x4,
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@ -572,8 +571,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(x1, padreg);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(padreg, x1);
__ LoadTaggedPointerField(
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(x4,
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@ -1108,11 +1107,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
Register feedback_vector = temps.AcquireX();
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ AssertFeedbackVector(feedback_vector, x4);
// Check the tiering state.
@ -1205,7 +1203,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call.
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
__ Pop<MacroAssembler::kAuthLR>(fp, lr);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ Trap();
}
@ -1270,9 +1268,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadTaggedPointerField(
__ LoadTaggedField(
x4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset));
@ -1288,17 +1286,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ B(ne, &compile_lazy);
// Load the feedback vector from the closure.
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
__ LoadTaggedPointerField(
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadTaggedField(x7,
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
__ B(ne, &push_stack_frame);
@ -1330,7 +1327,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// the frame (that is done below).
__ Bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push<TurboAssembler::kSignLR>(lr, fp);
__ Push<MacroAssembler::kSignLR>(lr, fp);
__ mov(fp, sp);
__ Push(cp, closure);
@ -1342,7 +1339,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Push actual argument count, bytecode array, Smi tagged bytecode array
// offset and an undefined (to properly align the stack pointer).
static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1);
static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1);
__ SmiTag(x6, kInterpreterBytecodeOffsetRegister);
__ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
@ -1480,16 +1477,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
__ LoadTaggedPointerField(
__ LoadTaggedField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ LoadTaggedPointerField(
__ LoadTaggedField(
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
@ -1582,7 +1579,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
}
__ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy,
TurboAssembler::kDstLessThanSrcAndReverse);
MacroAssembler::kDstLessThanSrcAndReverse);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Store "undefined" as the receiver arg if we need to.
@ -1732,16 +1729,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(x1, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
__ B(ne, &builtin_trampoline);
__ LoadTaggedPointerField(
__ LoadTaggedField(
x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeEntry(x1, x1);
__ B(&trampoline_loaded);
@ -1882,7 +1879,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Restore fp, lr.
__ Mov(sp, fp);
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
__ Pop<MacroAssembler::kAuthLR>(fp, lr);
__ LoadEntryFromBuiltinIndex(builtin);
__ Jump(builtin);
@ -1997,7 +1994,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
__ LoadTaggedField(
x1,
FieldMemOperand(
x0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
@ -2069,7 +2066,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Peek(arg_array, 2 * kSystemPointerSize);
__ bind(&done);
}
__ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
__ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
__ PushArgument(this_arg);
// ----------- S t a t e -------------
@ -2158,7 +2155,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ SlotAddress(copy_from, count);
__ Add(copy_to, copy_from, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, count,
TurboAssembler::kSrcLessThanDst);
MacroAssembler::kSrcLessThanDst);
__ Drop(2);
}
@ -2206,7 +2203,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Peek(arguments_list, 3 * kSystemPointerSize);
__ bind(&done);
}
__ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
__ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
__ PushArgument(this_argument);
// ----------- S t a t e -------------
@ -2264,7 +2261,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&done);
}
__ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
__ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
// Push receiver (undefined).
__ PushArgument(undefined_value);
@ -2348,7 +2345,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
Label ok, fail;
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
__ LoadTaggedPointerField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
__ LoadTaggedField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
__ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
__ Cmp(x13, FIXED_ARRAY_TYPE);
__ B(eq, &ok);
@ -2394,7 +2391,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Add(argc, argc, len); // Update new argc.
__ Bind(&loop);
__ Sub(len, len, 1);
__ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
__ LoadTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
__ CmpTagged(scratch, the_hole_value);
__ Csel(scratch, scratch, undefined_value, ne);
__ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
@ -2426,7 +2423,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(x3, &new_target_not_constructor);
__ LoadTaggedPointerField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
__ LoadTaggedField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
__ TestAndBranchIfAnySet(x5, Map::Bits1::IsConstructorBit::kMask,
&new_target_constructor);
@ -2486,14 +2483,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertCallableFunction(x1);
__ LoadTaggedPointerField(
__ LoadTaggedField(
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
__ LoadTaggedPointerField(cp,
FieldMemOperand(x1, JSFunction::kContextOffset));
__ LoadTaggedField(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
@ -2545,7 +2541,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(cp, x1, x0, padreg);
__ SmiUntag(x0);
}
__ LoadTaggedPointerField(
__ LoadTaggedField(
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Bind(&convert_receiver);
}
@ -2579,7 +2575,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into x2 and length of that into x4.
Label no_bound_arguments;
__ LoadTaggedPointerField(
__ LoadTaggedField(
bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagField(bound_argc,
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
@ -2662,7 +2658,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SlotAddress(copy_to, total_argc);
__ Sub(copy_from, copy_to, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, argc,
TurboAssembler::kSrcLessThanDst);
MacroAssembler::kSrcLessThanDst);
}
}
@ -2681,8 +2677,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SlotAddress(copy_to, 1);
__ Bind(&loop);
__ Sub(counter, counter, 1);
__ LoadAnyTaggedField(scratch,
MemOperand(bound_argv, kTaggedSize, PostIndex));
__ LoadTaggedField(scratch,
MemOperand(bound_argv, kTaggedSize, PostIndex));
__ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
__ Cbnz(counter, &loop);
}
@ -2703,15 +2699,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(x1);
// Patch the receiver to [[BoundThis]].
__ LoadAnyTaggedField(x10,
FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
__ LoadTaggedField(x10,
FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
__ Poke(x10, __ ReceiverOperand(x0));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ LoadTaggedPointerField(
__ LoadTaggedField(
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
@ -2812,7 +2808,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ LoadTaggedPointerField(
__ LoadTaggedField(
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAllClear(
@ -2844,13 +2840,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label done;
__ CmpTagged(x1, x3);
__ B(ne, &done);
__ LoadTaggedPointerField(
__ LoadTaggedField(
x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Bind(&done);
}
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ LoadTaggedPointerField(
__ LoadTaggedField(
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@ -2874,8 +2870,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
__ LoadTaggedPointerField(map,
FieldMemOperand(target, HeapObject::kMapOffset));
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
{
Register flags = x2;
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
@ -2976,12 +2971,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Register scratch = x10;
Label allocate_vector, done;
__ LoadTaggedPointerField(
__ LoadTaggedField(
vector, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kFeedbackVectorsOffset));
__ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
__ LoadTaggedPointerField(vector,
FieldMemOperand(vector, FixedArray::kHeaderSize));
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
__ JumpIfSmi(vector, &allocate_vector);
__ bind(&done);
__ Push(vector, xzr);
@ -2996,7 +2990,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
// Save registers.
__ PushXRegList(kSavedGpRegs);
__ PushQRegList(kSavedFpRegs);
__ Push<TurboAssembler::kSignLR>(lr, xzr); // xzr is for alignment.
__ Push<MacroAssembler::kSignLR>(lr, xzr); // xzr is for alignment.
// Arguments to the runtime function: instance, func_index, and an
// additional stack slot for the NativeModule. The first pushed register
@ -3008,7 +3002,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
__ Mov(vector, kReturnRegister0);
// Restore registers and frame type.
__ Pop<TurboAssembler::kAuthLR>(xzr, lr);
__ Pop<MacroAssembler::kAuthLR>(xzr, lr);
__ PopQRegList(kSavedFpRegs);
__ PopXRegList(kSavedGpRegs);
// Restore the instance from the frame.
@ -3121,8 +3115,8 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
// We had to prepare the parameters for the Call: we have to put the context
// into kContextRegister.
__ LoadAnyTaggedField(
kContextRegister, // cp(x27)
__ LoadTaggedField(
kContextRegister, // cp(x27)
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kNativeContextOffset)));
}
@ -3210,7 +3204,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data,
MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset));
__ Stp(wasm_instance, function_data,
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
__ LoadAnyTaggedField(
__ LoadTaggedField(
kContextRegister,
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kNativeContextOffset)));
@ -3256,15 +3250,14 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
wasm::JumpBuffer::Retired);
}
Register parent = tmp2;
__ LoadAnyTaggedField(
parent,
FieldMemOperand(active_continuation,
WasmContinuationObject::kParentOffset));
__ LoadTaggedField(parent,
FieldMemOperand(active_continuation,
WasmContinuationObject::kParentOffset));
// Update active continuation root.
int32_t active_continuation_offset =
TurboAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveContinuation);
MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveContinuation);
__ Str(parent, MemOperand(kRootRegister, active_continuation_offset));
jmpbuf = parent;
__ LoadExternalPointerField(
@ -3293,7 +3286,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset);
__ Move(tmp2, Smi::FromInt(WasmSuspenderObject::kInactive));
__ StoreTaggedField(tmp2, state_loc);
__ LoadAnyTaggedField(
__ LoadTaggedField(
suspender,
FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
@ -3313,8 +3306,8 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
__ StoreTaggedField(tmp2, state_loc);
__ bind(&undefined);
int32_t active_suspender_offset =
TurboAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveSuspender);
MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveSuspender);
__ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
}
@ -3322,17 +3315,16 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
Register function_data,
Register wasm_instance) {
Register closure = function_data;
__ LoadAnyTaggedField(
__ LoadTaggedField(
function_data,
MemOperand(
closure,
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
__ LoadAnyTaggedField(
__ LoadTaggedField(
function_data,
FieldMemOperand(function_data,
SharedFunctionInfo::kFunctionDataOffset));
FieldMemOperand(function_data, SharedFunctionInfo::kFunctionDataOffset));
__ LoadAnyTaggedField(
__ LoadTaggedField(
wasm_instance,
FieldMemOperand(function_data,
WasmExportedFunctionData::kInstanceOffset));
@ -3573,7 +3565,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// A result of AllocateSuspender is in the return register.
__ Str(suspender, MemOperand(fp, kSuspenderOffset));
DEFINE_SCOPED(target_continuation);
__ LoadAnyTaggedField(
__ LoadTaggedField(
target_continuation,
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
FREE_REG(suspender);
@ -4229,7 +4221,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
__ Mov(scratch, 1);
__ Str(scratch, MemOperand(thread_in_wasm_flag_addr, 0));
__ LoadAnyTaggedField(
__ LoadTaggedField(
function_entry,
FieldMemOperand(function_data,
WasmExportedFunctionData::kInternalOffset));
@ -4317,7 +4309,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// expected to be on the top of the stack).
// We cannot use just the ret instruction for this, because we cannot pass
// the number of slots to remove in a Register as an argument.
__ DropArguments(param_count, TurboAssembler::kCountExcludesReceiver);
__ DropArguments(param_count, MacroAssembler::kCountExcludesReceiver);
__ Ret(lr);
// -------------------------------------------
@ -4497,7 +4489,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
regs.ResetExcept(promise, suspender, continuation);
DEFINE_REG(suspender_continuation);
__ LoadAnyTaggedField(
__ LoadTaggedField(
suspender_continuation,
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
if (v8_flags.debug_code) {
@ -4518,18 +4510,19 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
// Update roots.
// -------------------------------------------
DEFINE_REG(caller);
__ LoadAnyTaggedField(caller,
FieldMemOperand(suspender_continuation,
WasmContinuationObject::kParentOffset));
__ LoadTaggedField(caller,
FieldMemOperand(suspender_continuation,
WasmContinuationObject::kParentOffset));
int32_t active_continuation_offset =
TurboAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveContinuation);
MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveContinuation);
__ Str(caller, MemOperand(kRootRegister, active_continuation_offset));
DEFINE_REG(parent);
__ LoadAnyTaggedField(
__ LoadTaggedField(
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
int32_t active_suspender_offset =
TurboAssembler::RootRegisterOffsetForRootIndex(RootIndex::kActiveSuspender);
MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveSuspender);
__ Str(parent, MemOperand(kRootRegister, active_suspender_offset));
regs.ResetExcept(promise, caller);
@ -4596,7 +4589,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
// Load suspender from closure.
// -------------------------------------------
DEFINE_REG(sfi);
__ LoadAnyTaggedField(
__ LoadTaggedField(
sfi,
MemOperand(
closure,
@ -4606,12 +4599,12 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
// RecordWriteField calls later.
DEFINE_PINNED(suspender, WriteBarrierDescriptor::ObjectRegister());
DEFINE_REG(function_data);
__ LoadAnyTaggedField(
__ LoadTaggedField(
function_data,
FieldMemOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
// The write barrier uses a fixed register for the host object (rdi). The next
// barrier is on the suspender, so load it in rdi directly.
__ LoadAnyTaggedField(
__ LoadTaggedField(
suspender,
FieldMemOperand(function_data, WasmResumeData::kSuspenderOffset));
// Check the suspender state.
@ -4660,8 +4653,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
scratch,
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset));
int32_t active_suspender_offset =
TurboAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveSuspender);
MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveSuspender);
__ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
// Next line we are going to load a field from suspender, but we have to use
@ -4670,10 +4663,9 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
FREE_REG(suspender);
DEFINE_PINNED(target_continuation, WriteBarrierDescriptor::ObjectRegister());
suspender = target_continuation;
__ LoadAnyTaggedField(
__ LoadTaggedField(
target_continuation,
FieldMemOperand(suspender,
WasmSuspenderObject::kContinuationOffset));
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
suspender = no_reg;
__ StoreTaggedField(
@ -4685,8 +4677,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
active_continuation, kLRHasBeenSaved, SaveFPRegsMode::kIgnore);
FREE_REG(active_continuation);
int32_t active_continuation_offset =
TurboAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveContinuation);
MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveContinuation);
__ Str(target_continuation,
MemOperand(kRootRegister, active_continuation_offset));
@ -4731,7 +4723,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
__ bind(&suspend);
__ LeaveFrame(StackFrame::STACK_SWITCH);
// Pop receiver + parameter.
__ DropArguments(2, TurboAssembler::kCountIncludesReceiver);
__ DropArguments(2, MacroAssembler::kCountIncludesReceiver);
__ Ret(lr);
}
} // namespace
@ -5320,12 +5312,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
name));
__ LoadAnyTaggedField(data,
FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ LoadTaggedField(data,
FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ LoadRoot(undef, RootIndex::kUndefinedValue);
__ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate()));
__ LoadTaggedPointerField(
name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ LoadTaggedField(name,
FieldMemOperand(callback, AccessorInfo::kNameOffset));
// PropertyCallbackArguments:
// receiver, data, return value, return value default, isolate, holder,
@ -5384,9 +5376,9 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
__ Poke<TurboAssembler::kSignLR>(lr, 0); // Store the return address.
__ Poke<MacroAssembler::kSignLR>(lr, 0); // Store the return address.
__ Blr(x10); // Call the C++ function.
__ Peek<TurboAssembler::kAuthLR>(lr, 0); // Return to calling code.
__ Peek<MacroAssembler::kAuthLR>(lr, 0); // Return to calling code.
__ AssertFPCRState();
__ Ret();
}
@ -5696,10 +5688,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get the InstructionStream object from the shared function info.
Register code_obj = x22;
__ LoadTaggedPointerField(
__ LoadTaggedField(
code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
@ -5731,11 +5723,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Load the feedback vector.
Register feedback_vector = x2;
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to

View File

@ -125,8 +125,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver);
__ ret(0);
__ bind(&stack_overflow);
@ -280,8 +280,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver);
__ ret(0);
// Otherwise we do a smi check and fall through to check if the return value
@ -768,8 +768,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ leave();
// Drop receiver + arguments.
__ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes,
MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@ -1810,8 +1810,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
__ bind(&no_this_arg);
__ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
// Restore receiver to edi.
__ movd(edi, xmm0);
@ -1919,8 +1919,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ movd(xmm0, edx);
__ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
// Restore argumentsList.
__ movd(edx, xmm0);
@ -1978,8 +1978,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ DropArgumentsAndPushNewReceiver(
eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
// Restore argumentsList.
__ movd(ecx, xmm0);

View File

@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
__ DropArguments(t3, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver, t3);
__ DropArguments(t3, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver, t3);
__ Ret();
}
@ -267,8 +267,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ DropArguments(a1, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver, a4);
__ DropArguments(a1, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver, a4);
__ Ret();
__ bind(&check_receiver);
@ -803,8 +803,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
__ DropArguments(params_size, TurboAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(params_size, MacroAssembler::kCountIsBytes,
MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@ -1328,7 +1328,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
// Push the arguments.
__ PushArray(start_address, num_args, scratch, scratch2,
TurboAssembler::PushArrayOrder::kReverse);
MacroAssembler::PushArrayOrder::kReverse);
}
// static
@ -1794,8 +1794,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Movz(arg_array, undefined_value, scratch); // if argc == 1
__ Ld_d(receiver, MemOperand(sp, 0));
__ DropArgumentsAndPushNewReceiver(argc, this_arg,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -1889,8 +1889,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, this_argument,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -1949,8 +1949,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Movz(new_target, target, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, undefined_value,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------

View File

@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
__ DropArguments(t3, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver, t3);
__ DropArguments(t3, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver, t3);
__ Ret();
}
@ -267,8 +267,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ DropArguments(a1, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver, a4);
__ DropArguments(a1, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver, a4);
__ Ret();
__ bind(&check_receiver);
@ -804,8 +804,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
__ DropArguments(params_size, TurboAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(params_size, MacroAssembler::kCountIsBytes,
MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@ -1320,7 +1320,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
// Push the arguments.
__ PushArray(start_address, num_args, scratch, scratch2,
TurboAssembler::PushArrayOrder::kReverse);
MacroAssembler::PushArrayOrder::kReverse);
}
// static
@ -1784,8 +1784,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Movz(arg_array, undefined_value, scratch); // if argc == 1
__ Ld(receiver, MemOperand(sp));
__ DropArgumentsAndPushNewReceiver(argc, this_arg,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -1881,8 +1881,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, this_argument,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -1941,8 +1941,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Movz(new_target, target, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, undefined_value,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------

View File

@ -64,7 +64,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
}
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0);
__ bne(&done);
__ LoadTaggedPointerField(
__ LoadTaggedField(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset), r0);
@ -120,10 +120,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get the InstructionStream object from the shared function info.
Register code_obj = r9;
__ LoadTaggedPointerField(
__ LoadTaggedField(
code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset),
r0);
__ LoadTaggedPointerField(
__ LoadTaggedField(
code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0);
@ -155,12 +155,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Load the feedback vector.
Register feedback_vector = r5;
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
r0);
__ LoadTaggedField(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
r0);
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
@ -361,8 +360,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(scratch, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver);
__ blr();
__ bind(&stack_overflow);
@ -431,7 +430,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
__ LoadTaggedField(
r4,
FieldMemOperand(
r3, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset),
@ -495,7 +494,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
__ LoadTaggedPointerField(
__ LoadTaggedField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
@ -611,8 +610,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ DropArguments(r4, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(r4, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver);
__ blr();
__ bind(&check_receiver);
@ -660,10 +659,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(r4);
// Load suspended function and context.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
__ LoadTaggedPointerField(cp, FieldMemOperand(r7, JSFunction::kContextOffset),
r0);
__ LoadTaggedField(cp, FieldMemOperand(r7, JSFunction::kContextOffset), r0);
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@ -703,12 +701,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadU16(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
__ subi(r6, r6, Operand(kJSArgcReceiverSlots));
__ LoadTaggedPointerField(
__ LoadTaggedField(
r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
r0);
{
@ -719,14 +717,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ blt(&done_loop);
__ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
__ add(scratch, r5, r10);
__ LoadAnyTaggedField(
scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
__ LoadTaggedField(scratch,
FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
__ Push(scratch);
__ b(&loop);
__ bind(&done_loop);
// Push receiver.
__ LoadAnyTaggedField(
__ LoadTaggedField(
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset), r0);
__ Push(scratch);
}
@ -734,9 +732,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline;
__ LoadTaggedPointerField(
__ LoadTaggedField(
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadTaggedPointerField(
__ LoadTaggedField(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset), r0);
GetSharedFunctionInfoBytecodeOrBaseline(masm, r6, ip, &is_baseline);
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
@ -746,7 +744,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ LoadTaggedPointerField(
__ LoadTaggedField(
r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadU16(r3, FieldMemOperand(
r3, SharedFunctionInfo::kFormalParameterCountOffset));
@ -756,8 +754,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mr(r6, r4);
__ mr(r4, r7);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset),
r0);
__ LoadTaggedField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0);
__ JumpCodeObject(r5);
}
@ -769,7 +766,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r4);
__ LoadTaggedPointerField(
__ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
}
__ b(&stepping_prepared);
@ -780,7 +777,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(r4);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(r4);
__ LoadTaggedPointerField(
__ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
}
__ b(&stepping_prepared);
@ -1119,8 +1116,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
__ DropArguments(params_size, TurboAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(params_size, MacroAssembler::kCountIsBytes,
MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@ -1212,12 +1209,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
Register feedback_vector = ip;
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
r0);
__ LoadTaggedField(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
r0);
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
__ AssertFeedbackVector(feedback_vector, r11);
// Check for an tiering state.
@ -1378,10 +1374,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0);
// Load original bytecode array or the debug copy.
__ LoadTaggedPointerField(
__ LoadTaggedField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(r7, SharedFunctionInfo::kFunctionDataOffset), r0);
@ -1397,17 +1393,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bne(&compile_lazy);
// Load the feedback vector from the closure.
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
r0);
__ LoadTaggedField(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
r0);
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
__ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
@ -1589,17 +1584,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
__ LoadTaggedPointerField(
__ LoadTaggedField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
r0);
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset),
r0);
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ LoadTaggedPointerField(
__ LoadTaggedField(
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE), r0);
@ -1636,7 +1631,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ sub(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, scratch, r0,
TurboAssembler::PushArrayOrder::kReverse);
MacroAssembler::PushArrayOrder::kReverse);
}
// static
@ -1773,16 +1768,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadTaggedPointerField(
__ LoadTaggedField(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset), r0);
__ CompareObjectType(r5, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
__ bne(&builtin_trampoline);
__ LoadTaggedPointerField(
__ LoadTaggedField(
r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset),
r0);
__ LoadCodeEntry(r5, r5);
@ -2027,8 +2022,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -2111,8 +2106,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -2160,8 +2155,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArgumentsAndPushNewReceiver(r3, r7, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -2240,8 +2235,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
Label ok, fail;
__ AssertNotSmi(r5);
__ LoadTaggedPointerField(scratch,
FieldMemOperand(r5, HeapObject::kMapOffset), r0);
__ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset),
r0);
__ LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
__ beq(&ok);
@ -2276,7 +2271,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ mtctr(r7);
__ bind(&loop);
__ LoadTaggedPointerField(scratch, MemOperand(r5, kTaggedSize), r0);
__ LoadTaggedField(scratch, MemOperand(r5, kTaggedSize), r0);
__ addi(r5, r5, Operand(kTaggedSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip);
@ -2311,8 +2306,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(r6, &new_target_not_constructor);
__ LoadTaggedPointerField(scratch,
FieldMemOperand(r6, HeapObject::kMapOffset), r0);
__ LoadTaggedField(scratch, FieldMemOperand(r6, HeapObject::kMapOffset),
r0);
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
__ bne(&new_target_constructor, cr0);
@ -2395,14 +2390,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertCallableFunction(r4);
__ LoadTaggedPointerField(
__ LoadTaggedField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
__ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
r0);
__ LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
@ -2456,7 +2450,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(r3, r4);
__ SmiUntag(r3);
}
__ LoadTaggedPointerField(
__ LoadTaggedField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ bind(&convert_receiver);
}
@ -2487,7 +2481,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into r5 and length of that into r7.
Label no_bound_arguments;
__ LoadTaggedPointerField(
__ LoadTaggedField(
r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset), r0);
__ SmiUntag(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC, r0);
__ beq(&no_bound_arguments, cr0);
@ -2536,7 +2530,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ subi(r7, r7, Operand(1));
__ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
__ add(scratch, scratch, r5);
__ LoadAnyTaggedField(scratch, MemOperand(scratch), r0);
__ LoadTaggedField(scratch, MemOperand(scratch), r0);
__ Push(scratch);
__ bdnz(&loop);
__ bind(&done);
@ -2559,15 +2553,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r4);
// Patch the receiver to [[BoundThis]].
__ LoadAnyTaggedField(
r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset), r0);
__ LoadTaggedField(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset),
r0);
__ StoreReceiver(r6, r3, ip);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
@ -2667,7 +2661,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
@ -2699,12 +2693,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label skip;
__ CompareTagged(r4, r6);
__ bne(&skip);
__ LoadTaggedPointerField(
__ LoadTaggedField(
r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@ -2728,8 +2722,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
__ LoadTaggedPointerField(
map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
{
Register flags = r5;
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
@ -2817,15 +2810,15 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Register scratch = ip;
Label allocate_vector, done;
__ LoadTaggedPointerField(
__ LoadTaggedField(
vector,
FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kFeedbackVectorsOffset),
scratch);
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
__ AddS64(vector, vector, scratch);
__ LoadTaggedPointerField(
vector, FieldMemOperand(vector, FixedArray::kHeaderSize), scratch);
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize),
scratch);
__ JumpIfSmi(vector, &allocate_vector);
__ bind(&done);
__ push(kWasmInstanceRegister);
@ -3530,16 +3523,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ push(receiver);
// Push data from AccessorInfo.
__ LoadAnyTaggedField(
scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
__ LoadTaggedField(scratch,
FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
__ push(scratch);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch, holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
__ LoadTaggedPointerField(
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
__ LoadTaggedField(scratch,
FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
__ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.

View File

@ -155,7 +155,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register func_info = temps.Acquire();
__ LoadTaggedPointerField(
__ LoadTaggedField(
func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Load32U(func_info,
FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset));
@ -353,7 +353,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
Label::Distance::kNear);
__ LoadTaggedPointerField(
__ LoadTaggedField(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@ -377,10 +377,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(a1);
// Load suspended function and context.
__ LoadTaggedPointerField(
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedPointerField(cp,
FieldMemOperand(a4, JSFunction::kContextOffset));
__ LoadTaggedField(a4,
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@ -417,12 +416,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// context allocation for any variables in generators, the actual argument
// values have already been copied into the context and these dummy values
// will never be used.
__ LoadTaggedPointerField(
__ LoadTaggedField(
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Lhu(a3,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
__ SubWord(a3, a3, Operand(kJSArgcReceiverSlots));
__ LoadTaggedPointerField(
__ LoadTaggedField(
t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
@ -431,23 +430,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ SubWord(a3, a3, Operand(1));
__ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
__ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
__ LoadAnyTaggedField(
kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
__ LoadTaggedField(kScratchReg,
FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
__ Push(kScratchReg);
__ Branch(&loop);
__ bind(&done_loop);
// Push receiver.
__ LoadAnyTaggedField(
kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ LoadTaggedField(kScratchReg,
FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ Push(kScratchReg);
}
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline;
__ LoadTaggedPointerField(
__ LoadTaggedField(
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
__ GetObjectType(a3, a3, a3);
@ -458,7 +457,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ LoadTaggedPointerField(
__ LoadTaggedField(
a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Lhu(a0, FieldMemOperand(
a0, SharedFunctionInfo::kFormalParameterCountOffset));
@ -468,7 +467,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Move(a3, a1);
__ Move(a1, a4);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ LoadTaggedPointerField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ LoadTaggedField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ JumpCodeObject(a2);
}
@ -481,8 +480,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(a1);
}
__ LoadTaggedPointerField(
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(a4,
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ Branch(&stepping_prepared);
__ bind(&prepare_step_in_suspended_generator);
@ -492,8 +491,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(a1);
}
__ LoadTaggedPointerField(
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(a4,
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ Branch(&stepping_prepared);
__ bind(&stack_overflow);
@ -1130,10 +1129,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Register feedback_vector = a2;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadTaggedPointerField(
__ LoadTaggedField(
kScratchReg,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
Label is_baseline;
@ -1147,17 +1146,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
// Load the feedback vector from the closure.
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
__ LoadTaggedPointerField(
a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadTaggedField(a4,
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
Label::Distance::kNear);
@ -1331,16 +1329,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
__ LoadTaggedPointerField(
__ LoadTaggedField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ LoadTaggedPointerField(
__ LoadTaggedField(
t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
@ -1381,7 +1379,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
// Push the arguments.
__ PushArray(start_address, num_args,
TurboAssembler::PushArrayOrder::kReverse);
MacroAssembler::PushArrayOrder::kReverse);
}
// static
@ -1511,16 +1509,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadWord(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
__ GetObjectType(t0, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister);
__ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear);
__ LoadTaggedPointerField(
__ LoadTaggedField(
t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeEntry(t0, t0);
__ BranchShort(&trampoline_loaded);
@ -1778,7 +1776,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
__ LoadTaggedField(
a1,
MemOperand(a0,
InstructionStream::kDeoptimizationDataOrInterpreterDataOffset -
@ -2152,7 +2150,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ SubWord(scratch, sp, Operand(scratch));
__ LoadRoot(hole_value, RootIndex::kTheHoleValue);
__ bind(&loop);
__ LoadTaggedPointerField(a5, MemOperand(src));
__ LoadTaggedField(a5, MemOperand(src));
__ AddWord(src, src, kTaggedSize);
__ Branch(&push, ne, a5, Operand(hole_value), Label::Distance::kNear);
__ LoadRoot(a5, RootIndex::kUndefinedValue);
@ -2190,8 +2188,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ JumpIfSmi(a3, &new_target_not_constructor);
__ LoadTaggedPointerField(scratch,
FieldMemOperand(a3, HeapObject::kMapOffset));
__ LoadTaggedField(scratch, FieldMemOperand(a3, HeapObject::kMapOffset));
__ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
__ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg),
@ -2271,7 +2268,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ AssertCallableFunction(a1);
Label class_constructor;
__ LoadTaggedPointerField(
__ LoadTaggedField(
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(kScratchReg, a3,
@ -2281,8 +2278,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
__ LoadTaggedPointerField(cp,
FieldMemOperand(a1, JSFunction::kContextOffset));
__ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
@ -2337,7 +2333,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(a0, a1);
__ SmiUntag(a0);
}
__ LoadTaggedPointerField(
__ LoadTaggedField(
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
@ -2379,7 +2375,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register bound_argv = a2;
// Load [[BoundArguments]] into a2 and length of that into a4.
Label no_bound_arguments;
__ LoadTaggedPointerField(
__ LoadTaggedField(
bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagField(bound_argc,
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
@ -2423,7 +2419,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SubWord(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear);
__ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2);
__ LoadAnyTaggedField(kScratchReg, MemOperand(a5));
__ LoadTaggedField(kScratchReg, MemOperand(a5));
__ Push(kScratchReg);
__ Branch(&loop);
__ bind(&done_loop);
@ -2449,8 +2445,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ LoadAnyTaggedField(
scratch, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ LoadTaggedField(scratch,
FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ StoreReceiver(scratch, a0, kScratchReg);
}
@ -2458,7 +2454,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ LoadTaggedPointerField(
__ LoadTaggedField(
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
@ -2548,7 +2544,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ LoadTaggedPointerField(
__ LoadTaggedField(
a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Load32U(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
__ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
@ -2587,12 +2583,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Branch(&skip, ne, a1, Operand(a3), Label::Distance::kNear);
#endif
}
__ LoadTaggedPointerField(
__ LoadTaggedField(
a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ LoadTaggedPointerField(
__ LoadTaggedField(
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@ -2615,7 +2611,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
temps.Include(t0, t1);
Register map = temps.Acquire();
Register scratch = temps.Acquire();
__ LoadTaggedPointerField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
__ LoadTaggedField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
__ Branch(&non_constructor, eq, scratch, Operand(zero_reg));
@ -3366,8 +3362,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ SubWord(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize);
__ StoreWord(receiver,
MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize));
__ LoadAnyTaggedField(scratch,
FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ LoadTaggedField(scratch,
FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ StoreWord(scratch,
MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize));
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
@ -3385,8 +3381,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
DCHECK_EQ(0, Smi::zero().ptr());
__ StoreWord(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) *
kSystemPointerSize));
__ LoadTaggedPointerField(
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ LoadTaggedField(scratch,
FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ StoreWord(scratch, MemOperand(sp, 0 * kSystemPointerSize));
// v8::PropertyCallbackInfo::args_ array and name handle.
@ -3677,10 +3673,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get the InstructionStream object from the shared function info.
Register code_obj = s1;
__ LoadTaggedPointerField(
__ LoadTaggedField(
code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
@ -3719,11 +3715,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.

View File

@ -65,7 +65,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
}
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE));
__ bne(&done);
__ LoadTaggedPointerField(
__ LoadTaggedField(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@ -120,10 +120,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get the InstructionStream object from the shared function info.
Register code_obj = r8;
__ LoadTaggedPointerField(
__ LoadTaggedField(
code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
@ -155,11 +155,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Load the feedback vector.
Register feedback_vector = r4;
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
@ -320,7 +319,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
__ LoadTaggedField(
r3,
FieldMemOperand(
r2, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
@ -428,8 +427,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(scratch, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver);
__ Ret();
__ bind(&stack_overflow);
@ -472,7 +471,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
__ LoadTaggedPointerField(
__ LoadTaggedField(
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
@ -584,8 +583,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ DropArguments(r3, TurboAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(r3, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver);
__ Ret();
__ bind(&check_receiver);
@ -633,10 +632,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(r3);
// Load suspended function and context.
__ LoadTaggedPointerField(
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedPointerField(cp,
FieldMemOperand(r6, JSFunction::kContextOffset));
__ LoadTaggedField(r6,
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@ -677,12 +675,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadU16(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
__ SubS64(r5, r5, Operand(kJSArgcReceiverSlots));
__ LoadTaggedPointerField(
__ LoadTaggedField(
r4,
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
@ -692,24 +690,24 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ blt(&done_loop);
__ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2));
__ la(scratch, MemOperand(r4, r1));
__ LoadAnyTaggedField(scratch,
FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ LoadTaggedField(scratch,
FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ Push(scratch);
__ b(&loop);
__ bind(&done_loop);
// Push receiver.
__ LoadAnyTaggedField(
scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
__ LoadTaggedField(scratch,
FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
}
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline;
__ LoadTaggedPointerField(
__ LoadTaggedField(
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecodeOrBaseline(masm, r5, ip, &is_baseline);
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
@ -719,7 +717,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ LoadTaggedPointerField(
__ LoadTaggedField(
r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadS16(
r2,
@ -730,7 +728,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(r5, r3);
__ mov(r3, r6);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ LoadTaggedField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ JumpCodeObject(r4);
}
@ -742,8 +740,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r3);
__ LoadTaggedPointerField(
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(r6,
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@ -753,8 +751,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(r3);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(r3);
__ LoadTaggedPointerField(
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(r6,
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@ -1148,8 +1146,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
__ DropArguments(params_size, TurboAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(params_size, MacroAssembler::kCountIsBytes,
MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@ -1245,11 +1243,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
Register feedback_vector = ip;
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ AssertFeedbackVector(feedback_vector, r1);
// Check for an tiering state.
@ -1406,10 +1403,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadTaggedPointerField(
__ LoadTaggedField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
@ -1425,17 +1422,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bne(&compile_lazy);
// Load the feedback vector from the closure.
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
__ LoadTaggedPointerField(
r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadTaggedField(r6,
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
__ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
@ -1611,16 +1607,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
__ LoadTaggedPointerField(
__ LoadTaggedField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ LoadTaggedPointerField(
__ LoadTaggedField(
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE));
@ -1657,7 +1653,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ SubS64(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, r1, scratch,
TurboAssembler::PushArrayOrder::kReverse);
MacroAssembler::PushArrayOrder::kReverse);
}
// static
@ -1792,16 +1788,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadU64(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r4, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
__ bne(&builtin_trampoline);
__ LoadTaggedPointerField(
__ LoadTaggedField(
r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeEntry(r4, r4);
__ b(&trampoline_loaded);
@ -2022,8 +2018,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -2107,8 +2103,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -2157,8 +2153,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArgumentsAndPushNewReceiver(r2, r6, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -2240,8 +2236,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
Label ok, fail;
__ AssertNotSmi(r4);
__ LoadTaggedPointerField(scratch,
FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadTaggedField(scratch, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadS16(scratch,
FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ CmpS64(scratch, Operand(FIXED_ARRAY_TYPE));
@ -2277,7 +2272,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ mov(r1, r6);
__ bind(&loop);
__ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
__ LoadTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
__ la(r4, MemOperand(r4, kTaggedSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip, Label::kNear);
@ -2312,8 +2307,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(r5, &new_target_not_constructor);
__ LoadTaggedPointerField(scratch,
FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadU8(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift));
__ bne(&new_target_constructor);
@ -2397,14 +2391,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertCallableFunction(r3);
__ LoadTaggedPointerField(
__ LoadTaggedField(
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
__ LoadTaggedPointerField(cp,
FieldMemOperand(r3, JSFunction::kContextOffset));
__ LoadTaggedField(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
@ -2458,7 +2451,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(r2, r3);
__ SmiUntag(r2);
}
__ LoadTaggedPointerField(
__ LoadTaggedField(
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
@ -2489,7 +2482,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into r4 and length of that into r6.
Label no_bound_arguments;
__ LoadTaggedPointerField(
__ LoadTaggedField(
r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ LoadAndTestP(r6, r6);
@ -2535,7 +2528,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ SubS64(r1, r6, Operand(1));
__ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2));
__ LoadAnyTaggedField(scratch, MemOperand(r4, r1), r0);
__ LoadTaggedField(scratch, MemOperand(r4, r1), r0);
__ Push(scratch);
__ SubS64(r6, r6, Operand(1));
__ bgt(&loop);
@ -2559,15 +2552,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r3);
// Patch the receiver to [[BoundThis]].
__ LoadAnyTaggedField(r5,
FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ LoadTaggedField(r5,
FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ StoreReceiver(r5, r2, r1);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
@ -2667,7 +2660,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
@ -2698,12 +2691,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label skip;
__ CompareTagged(r3, r5);
__ bne(&skip);
__ LoadTaggedPointerField(
__ LoadTaggedField(
r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ LoadTaggedPointerField(
__ LoadTaggedField(
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@ -2727,8 +2720,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
__ LoadTaggedPointerField(map,
FieldMemOperand(target, HeapObject::kMapOffset));
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
{
Register flags = r4;
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
@ -2811,13 +2803,12 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Register scratch = r0;
Label allocate_vector, done;
__ LoadTaggedPointerField(
__ LoadTaggedField(
vector, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kFeedbackVectorsOffset));
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
__ AddS64(vector, vector, scratch);
__ LoadTaggedPointerField(vector,
FieldMemOperand(vector, FixedArray::kHeaderSize));
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
__ JumpIfSmi(vector, &allocate_vector);
__ bind(&done);
__ push(kWasmInstanceRegister);
@ -3504,16 +3495,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ push(receiver);
// Push data from AccessorInfo.
__ LoadAnyTaggedField(
scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
__ LoadTaggedField(scratch,
FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
__ push(scratch);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch, holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
__ LoadTaggedPointerField(
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
__ LoadTaggedField(scratch,
FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
__ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.

View File

@ -61,6 +61,7 @@ extern runtime WasmStringViewWtf8Slice(
Context, ByteArray, Number, Number): String;
extern runtime WasmStringCompare(NoContext, String, String): Smi;
extern runtime WasmStringFromCodePoint(Context, Number): String;
extern runtime WasmStringHash(NoContext, String): Smi;
extern runtime WasmJSToWasmObject(Context, JSAny, Smi): JSAny;
}
@ -699,6 +700,10 @@ builtin ThrowWasmTrapArrayTooLarge(): JSAny {
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapArrayTooLarge));
}
builtin ThrowWasmTrapStringOffsetOutOfBounds(): JSAny {
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapStringOffsetOutOfBounds));
}
macro TryNumberToIntptr(value: JSAny): intptr labels Failure {
typeswitch (value) {
case (s: Smi): {
@ -939,6 +944,13 @@ builtin WasmStringNewWtf16Array(
}
}
// Contract: input is any string, output is a string that the TF operator
// "StringPrepareForGetCodeunit" can handle.
builtin WasmStringAsWtf16(str: String): String {
const cons = Cast<ConsString>(str) otherwise return str;
return Flatten(cons);
}
builtin WasmStringConst(index: uint32): String {
const instance = LoadInstanceFromFrame();
tail runtime::WasmStringConst(
@ -1254,6 +1266,11 @@ builtin WasmStringFromCodePoint(codePoint: uint32): String {
LoadContextFromFrame(), WasmUint32ToNumber(codePoint));
}
builtin WasmStringHash(string: String): int32 {
const result = runtime::WasmStringHash(kNoContext, string);
return SmiToInt32(result);
}
builtin WasmExternInternalize(externObject: JSAny): JSAny {
const instance = LoadInstanceFromFrame();
const context = LoadContextFromInstance(instance);

View File

@ -125,7 +125,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIncludesReceiver);
__ ret(0);
@ -171,9 +171,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -----------------------------------
const TaggedRegister shared_function_info(rbx);
__ LoadTaggedPointerField(
shared_function_info,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedField(shared_function_info,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movl(rbx,
FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
@ -282,7 +281,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIncludesReceiver);
__ ret(0);
// If the result is a smi, it is *not* an object in the ECMA sense.
@ -701,7 +700,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
__ j(not_equal, &done, Label::kNear);
__ LoadTaggedPointerField(
__ LoadTaggedField(
sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ bind(&done);
@ -729,9 +728,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
// Load suspended function and context.
__ LoadTaggedPointerField(
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
__ LoadTaggedField(rdi,
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@ -768,12 +767,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedField(rcx,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
__ decq(rcx); // Exclude receiver.
__ LoadTaggedPointerField(
__ LoadTaggedField(
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
{
@ -781,24 +780,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&loop);
__ decq(rcx);
__ j(less, &done_loop, Label::kNear);
__ PushTaggedAnyField(
__ PushTaggedField(
FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize),
decompr_scratch1);
__ jmp(&loop);
__ bind(&done_loop);
// Push the receiver.
__ PushTaggedPointerField(
FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
decompr_scratch1);
__ PushTaggedField(FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
decompr_scratch1);
}
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline, ok;
__ LoadTaggedPointerField(
__ LoadTaggedField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
__ LoadTaggedField(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister,
&is_baseline);
@ -816,7 +814,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ PushReturnAddressFrom(rax);
__ LoadTaggedPointerField(
__ LoadTaggedField(
rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(rax, FieldOperand(
rax, SharedFunctionInfo::kFormalParameterCountOffset));
@ -824,7 +822,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ LoadTaggedField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ JumpCodeObject(rcx);
}
@ -837,8 +835,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(rdx);
__ LoadTaggedPointerField(
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(rdi,
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
}
__ jmp(&stepping_prepared);
@ -848,8 +846,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(rdx);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(rdx);
__ LoadTaggedPointerField(
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedField(rdi,
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
}
__ jmp(&stepping_prepared);
@ -890,8 +888,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ leave();
// Drop receiver + arguments.
__ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes,
MacroAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_state| == |expected_state|
@ -1019,13 +1017,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
const TaggedRegister shared_function_info(kScratchRegister);
__ LoadTaggedPointerField(
__ LoadTaggedField(
shared_function_info,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
kInterpreterBytecodeArrayRegister,
FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedField(kInterpreterBytecodeArrayRegister,
FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
Label is_baseline;
GetSharedFunctionInfoBytecodeOrBaseline(
@ -1040,10 +1037,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Load the feedback vector from the closure.
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField(
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_cell, Cell::kValueOffset));
__ LoadTaggedField(feedback_cell,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldOperand(feedback_cell, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
@ -1220,10 +1217,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
{
// Load the feedback vector from the closure.
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField(
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_cell, Cell::kValueOffset));
__ LoadTaggedField(feedback_cell,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldOperand(feedback_cell, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
@ -1265,7 +1262,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
kSystemPointerSize));
// Push the arguments.
__ PushArray(start_address, num_args, scratch,
TurboAssembler::PushArrayOrder::kReverse);
MacroAssembler::PushArrayOrder::kReverse);
}
// static
@ -1417,16 +1414,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// trampoline.
__ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
const TaggedRegister shared_function_info(rbx);
__ LoadTaggedPointerField(
shared_function_info,
FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
rbx, FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedField(shared_function_info,
FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedField(rbx,
FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
__ j(not_equal, &builtin_trampoline, Label::kNear);
__ LoadTaggedPointerField(
__ LoadTaggedField(
rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeEntry(rbx, rbx);
__ jmp(&trampoline_loaded, Label::kNear);
@ -1555,10 +1551,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField(
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_cell, Cell::kValueOffset));
__ LoadTaggedField(feedback_cell,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldOperand(feedback_cell, Cell::kValueOffset));
__ AssertFeedbackVector(feedback_vector);
// Check the tiering state.
@ -1814,8 +1810,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
__ bind(&no_this_arg);
__ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -1919,8 +1915,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ movq(rbx, args[3]); // argumentsList
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -1971,8 +1967,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(
rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@ -2097,8 +2093,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ cmpl(current, num);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
__ LoadAnyTaggedField(value, FieldOperand(src, current, times_tagged_size,
FixedArray::kHeaderSize));
__ LoadTaggedField(value, FieldOperand(src, current, times_tagged_size,
FixedArray::kHeaderSize));
__ CompareRoot(value, RootIndex::kTheHoleValue);
__ j(not_equal, &push, Label::kNear);
__ LoadRoot(value, RootIndex::kUndefinedValue);
@ -2213,8 +2209,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
StackArgumentsAccessor args(rax);
__ AssertCallableFunction(rdi);
__ LoadTaggedPointerField(
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedField(rdx,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
// ----------- S t a t e -------------
// -- rax : the number of arguments
// -- rdx : the shared function info.
@ -2224,7 +2220,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
__ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
__ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
@ -2281,7 +2277,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(rax);
__ SmiUntagUnsigned(rax);
}
__ LoadTaggedPointerField(
__ LoadTaggedField(
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
@ -2312,8 +2308,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into rcx and length of that into rbx.
Label no_bound_arguments;
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
__ LoadTaggedField(rcx,
FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagFieldUnsigned(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ testl(rbx, rbx);
__ j(zero, &no_bound_arguments);
@ -2354,7 +2350,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Push [[BoundArguments]] to the stack.
{
Label loop;
__ LoadTaggedPointerField(
__ LoadTaggedField(
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagFieldUnsigned(rbx,
FieldOperand(rcx, FixedArray::kLengthOffset));
@ -2364,9 +2360,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// offset in order to be able to move decl(rbx) right before the loop
// condition. This is necessary in order to avoid flags corruption by
// pointer decompression code.
__ LoadAnyTaggedField(
r12, FieldOperand(rcx, rbx, times_tagged_size,
FixedArray::kHeaderSize - kTaggedSize));
__ LoadTaggedField(r12,
FieldOperand(rcx, rbx, times_tagged_size,
FixedArray::kHeaderSize - kTaggedSize));
__ Push(r12);
__ decl(rbx);
__ j(greater, &loop);
@ -2391,15 +2387,14 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
StackArgumentsAccessor args(rax);
__ LoadAnyTaggedField(rbx,
FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
__ LoadTaggedField(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
__ movq(args.GetReceiverOperand(), rbx);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ LoadTaggedPointerField(
__ LoadTaggedField(
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
@ -2498,9 +2493,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
const TaggedRegister shared_function_info(rcx);
__ LoadTaggedPointerField(
shared_function_info,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedField(shared_function_info,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
@ -2528,13 +2522,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label done;
__ cmpq(rdi, rdx);
__ j(not_equal, &done, Label::kNear);
__ LoadTaggedPointerField(
__ LoadTaggedField(
rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&done);
}
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ LoadTaggedPointerField(
__ LoadTaggedField(
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@ -2677,7 +2671,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// Load deoptimization data from the code object.
const TaggedRegister deopt_data(rbx);
__ LoadTaggedPointerField(
__ LoadTaggedField(
deopt_data,
FieldOperand(
rax, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
@ -2776,12 +2770,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
__ Push(rbp);
__ Move(rbp, rsp);
__ Push(Immediate(StackFrame::TypeToMarker(StackFrame::WASM)));
__ LoadTaggedPointerField(
vector, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kFeedbackVectorsOffset));
__ LoadTaggedPointerField(vector,
FieldOperand(vector, func_index, times_tagged_size,
FixedArray::kHeaderSize));
__ LoadTaggedField(vector,
FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kFeedbackVectorsOffset));
__ LoadTaggedField(vector, FieldOperand(vector, func_index, times_tagged_size,
FixedArray::kHeaderSize));
Label allocate_vector, done;
__ JumpIfSmi(vector, &allocate_vector);
__ bind(&done);
@ -2931,7 +2924,7 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
__ pushq(function_data);
// We had to prepare the parameters for the Call: we have to put the context
// into rsi.
__ LoadAnyTaggedField(
__ LoadTaggedField(
rsi,
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kNativeContextOffset)));
@ -3012,7 +3005,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data,
__ Move(GCScanSlotPlace, 2);
__ Push(wasm_instance);
__ Push(function_data);
__ LoadAnyTaggedField(
__ LoadTaggedField(
kContextRegister,
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kNativeContextOffset)));
@ -3052,7 +3045,7 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
wasm::JumpBuffer::Retired);
Register parent = tmp2;
__ LoadAnyTaggedField(
__ LoadTaggedField(
parent,
FieldOperand(active_continuation, WasmContinuationObject::kParentOffset));
@ -3083,7 +3076,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
__ StoreTaggedSignedField(
FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
Smi::FromInt(WasmSuspenderObject::kInactive));
__ LoadAnyTaggedField(
__ LoadTaggedField(
suspender, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
Label undefined;
@ -3111,19 +3104,19 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
Register wasm_instance) {
Register closure = function_data;
Register shared_function_info = closure;
__ LoadAnyTaggedField(
__ LoadTaggedField(
shared_function_info,
MemOperand(
closure,
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
closure = no_reg;
__ LoadAnyTaggedField(
__ LoadTaggedField(
function_data,
MemOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag));
shared_function_info = no_reg;
__ LoadAnyTaggedField(
__ LoadTaggedField(
wasm_instance,
MemOperand(function_data,
WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
@ -3224,7 +3217,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
Register suspender = rax; // Fixed.
__ movq(MemOperand(rbp, kSuspenderOffset), suspender);
Register target_continuation = rax;
__ LoadAnyTaggedField(
__ LoadTaggedField(
target_continuation,
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
suspender = no_reg;
@ -3728,7 +3721,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
Register function_entry = function_data;
Register scratch = r12;
__ LoadAnyTaggedField(
__ LoadTaggedField(
function_entry,
FieldOperand(function_data, WasmExportedFunctionData::kInternalOffset));
__ LoadExternalPointerField(
@ -3812,8 +3805,8 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// expected to be on the top of the stack).
// We cannot use just the ret instruction for this, because we cannot pass the
// number of slots to remove in a Register as an argument.
__ DropArguments(param_count, rbx, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountExcludesReceiver);
__ DropArguments(param_count, rbx, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountExcludesReceiver);
__ ret(0);
// --------------------------------------------------------------------------
@ -4081,7 +4074,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
// live: [rax, rbx, rcx]
Register suspender_continuation = rdx;
__ LoadAnyTaggedField(
__ LoadTaggedField(
suspender_continuation,
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
#ifdef DEBUG
@ -4102,12 +4095,12 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
// Update roots.
// -------------------------------------------
Register caller = rcx;
__ LoadAnyTaggedField(caller,
FieldOperand(suspender_continuation,
WasmContinuationObject::kParentOffset));
__ LoadTaggedField(caller,
FieldOperand(suspender_continuation,
WasmContinuationObject::kParentOffset));
__ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), caller);
Register parent = rdx;
__ LoadAnyTaggedField(
__ LoadTaggedField(
parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), parent);
parent = no_reg;
@ -4172,19 +4165,19 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
// Load suspender from closure.
// -------------------------------------------
Register sfi = closure;
__ LoadAnyTaggedField(
__ LoadTaggedField(
sfi,
MemOperand(
closure,
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
Register function_data = sfi;
__ LoadAnyTaggedField(
__ LoadTaggedField(
function_data,
FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
// The write barrier uses a fixed register for the host object (rdi). The next
// barrier is on the suspender, so load it in rdi directly.
Register suspender = rdi;
__ LoadAnyTaggedField(
__ LoadTaggedField(
suspender, FieldOperand(function_data, WasmResumeData::kSuspenderOffset));
// Check the suspender state.
Label suspender_is_suspended;
@ -4233,7 +4226,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender);
Register target_continuation = suspender;
__ LoadAnyTaggedField(
__ LoadTaggedField(
target_continuation,
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
suspender = no_reg;
@ -4848,16 +4841,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Insert additional parameters into the stack frame above return address.
__ PopReturnAddressTo(scratch);
__ Push(receiver);
__ PushTaggedAnyField(FieldOperand(callback, AccessorInfo::kDataOffset),
decompr_scratch1);
__ PushTaggedField(FieldOperand(callback, AccessorInfo::kDataOffset),
decompr_scratch1);
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
__ Push(kScratchRegister); // return value
__ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(masm->isolate()));
__ Push(holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
__ PushTaggedPointerField(FieldOperand(callback, AccessorInfo::kNameOffset),
decompr_scratch1);
__ PushTaggedField(FieldOperand(callback, AccessorInfo::kNameOffset),
decompr_scratch1);
__ PushReturnAddressFrom(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
@ -5129,12 +5122,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get the InstructionStream object from the shared function info.
Register code_obj = rbx;
TaggedRegister shared_function_info(code_obj);
__ LoadTaggedPointerField(
__ LoadTaggedField(
shared_function_info,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
code_obj, FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedField(code_obj,
FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
// Check if we have baseline code. For OSR entry it is safe to assume we
// always have baseline code.
@ -5166,10 +5159,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register feedback_vector = r11;
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField(
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_cell, Cell::kValueOffset));
__ LoadTaggedField(feedback_cell,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedField(feedback_vector,
FieldOperand(feedback_cell, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to

View File

@ -1435,7 +1435,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
private:
friend class Assembler;
friend class TurboAssembler;
friend class MacroAssembler;
template <typename T>
bool CanAcquireVfp() const;

File diff suppressed because it is too large Load Diff

View File

@ -43,9 +43,9 @@ enum TargetAddressStorageMode {
NEVER_INLINE_TARGET_ADDRESS
};
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
using MacroAssemblerBase::MacroAssemblerBase;
// Activation support.
void EnterFrame(StackFrame::Type type,
@ -596,49 +596,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src);
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src);
private:
// Compare single values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
const Register fpscr_flags,
const Condition cond = al);
void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
const Register fpscr_flags,
const Condition cond = al);
// Compare double values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Register fpscr_flags,
const Condition cond = al);
void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
const Register fpscr_flags,
const Condition cond = al);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
// Implementation helpers for FloatMin and FloatMax.
template <typename T>
void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
template <typename T>
void FloatMinHelper(T result, T left, T right, Label* out_of_line);
template <typename T>
void FloatMaxOutOfLineHelper(T result, T left, T right);
template <typename T>
void FloatMinOutOfLineHelper(T result, T left, T right);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
void And(Register dst, Register src1, const Operand& src2,
@ -899,6 +856,42 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count, Label* done,
InvokeType type);
// Compare single values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
const Register fpscr_flags,
const Condition cond = al);
void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
const Register fpscr_flags,
const Condition cond = al);
// Compare double values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Register fpscr_flags,
const Condition cond = al);
void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
const Register fpscr_flags,
const Condition cond = al);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
// Implementation helpers for FloatMin and FloatMax.
template <typename T>
void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
template <typename T>
void FloatMinHelper(T result, T left, T right, Label* out_of_line);
template <typename T>
void FloatMaxOutOfLineHelper(T result, T left, T right);
template <typename T>
void FloatMinOutOfLineHelper(T result, T left, T right);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};

View File

@ -659,8 +659,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
Tagged_t compressed =
Assembler::target_compressed_address_at(pc_, constant_pool_);
DCHECK(!HAS_SMI_TAG(compressed));
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
compressed));
Object obj(
V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
// Embedding of compressed InstructionStream objects must not happen when
// external code space is enabled, because Codes must be used
// instead.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -146,9 +146,9 @@ enum PreShiftImmMode {
// platforms are updated.
enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
using MacroAssemblerBase::MacroAssemblerBase;
#if DEBUG
void set_allow_macro_instructions(bool value) {
@ -1400,14 +1400,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// ---------------------------------------------------------------------------
// Pointer compression Support
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
void LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand);
void LoadTaggedField(const Register& destination,
const MemOperand& field_operand);
// Loads a field containing a tagged signed value and decompresses it if
// necessary.
@ -1432,24 +1427,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
void DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand);
void DecompressTaggedPointer(const Register& destination,
const Register& source);
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
void DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand);
void DecompressTagged(const Register& destination,
const MemOperand& field_operand);
void DecompressTagged(const Register& destination, const Register& source);
void DecompressTagged(const Register& destination, Tagged_t immediate);
void AtomicDecompressTaggedSigned(const Register& destination,
const Register& base, const Register& index,
const Register& temp);
void AtomicDecompressTaggedPointer(const Register& destination,
const Register& base,
const Register& index,
const Register& temp);
void AtomicDecompressAnyTagged(const Register& destination,
const Register& base, const Register& index,
const Register& temp);
void AtomicDecompressTagged(const Register& destination, const Register& base,
const Register& index, const Register& temp);
// Restore FP and LR from the values stored in the current frame. This will
// authenticate the LR when pointer authentication is enabled.
@ -1484,81 +1471,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
ExternalPointerTag tag,
Register isolate_root = Register::no_reg());
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
// (Push|Pop)CPURegList to bundle together run-time assertions for a large
// block of registers.
//
// Note that size is per register, and is specified in bytes.
void PushHelper(int count, int size, const CPURegister& src0,
const CPURegister& src1, const CPURegister& src2,
const CPURegister& src3);
void PopHelper(int count, int size, const CPURegister& dst0,
const CPURegister& dst1, const CPURegister& dst2,
const CPURegister& dst3);
void ConditionalCompareMacro(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond,
ConditionalCompareOp op);
void AddSubWithCarryMacro(const Register& rd, const Register& rn,
const Operand& operand, FlagsUpdate S,
AddSubWithCarryOp op);
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
// arguments and stack must be prepared by the caller as for a normal AAPCS64
// call to 'printf'.
//
// The 'args' argument should point to an array of variable arguments in their
// proper PCS registers (and in calling order). The argument registers can
// have mixed types. The format string (x0) should not be included.
void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
private:
#if DEBUG
// Tell whether any of the macro instruction can be used. When false the
// MacroAssembler will assert if a method which can emit a variable number
// of instructions is called.
bool allow_macro_instructions_ = true;
#endif
// Scratch registers available for use by the MacroAssembler.
CPURegList tmp_list_ = DefaultTmpList();
CPURegList fptmp_list_ = DefaultFPTmpList();
// Helps resolve branching to labels potentially out of range.
// If the label is not bound, it registers the information necessary to later
// be able to emit a veneer for this branch if necessary.
// If the label is bound, it returns true if the label (or the previous link
// in the label chain) is out of range. In that case the caller is responsible
// for generating appropriate code.
// Otherwise it returns false.
// This function also checks wether veneers need to be emitted.
bool NeedExtraInstructionsOrRegisterBranch(Label* label,
ImmBranchType branch_type);
void Movi16bitHelper(const VRegister& vd, uint64_t imm);
void Movi32bitHelper(const VRegister& vd, uint64_t imm);
void Movi64bitHelper(const VRegister& vd, uint64_t imm);
void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
LoadStoreOp op);
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
byte* pc);
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
};
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// Instruction set functions ------------------------------------------------
// Logical macros.
inline void Bics(const Register& rd, const Register& rn,
@ -1594,18 +1506,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Condition cond);
inline void Extr(const Register& rd, const Register& rn, const Register& rm,
unsigned lsb);
void Fcvtl(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions());
fcvtl(vd, vn);
}
void Fcvtl2(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions());
fcvtl2(vd, vn);
}
void Fcvtn(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions());
fcvtn(vd, vn);
}
void Fcvtn2(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions());
fcvtn2(vd, vn);
@ -1641,7 +1545,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DCHECK(allow_macro_instructions());
mvni(vd, imm8, shift, shift_amount);
}
inline void Rev(const Register& rd, const Register& rn);
inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
@ -2139,6 +2042,76 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register feedback_vector, FeedbackSlot slot,
Label* on_result, Label::Distance distance);
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
// (Push|Pop)CPURegList to bundle together run-time assertions for a large
// block of registers.
//
// Note that size is per register, and is specified in bytes.
void PushHelper(int count, int size, const CPURegister& src0,
const CPURegister& src1, const CPURegister& src2,
const CPURegister& src3);
void PopHelper(int count, int size, const CPURegister& dst0,
const CPURegister& dst1, const CPURegister& dst2,
const CPURegister& dst3);
void ConditionalCompareMacro(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond,
ConditionalCompareOp op);
void AddSubWithCarryMacro(const Register& rd, const Register& rn,
const Operand& operand, FlagsUpdate S,
AddSubWithCarryOp op);
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
// arguments and stack must be prepared by the caller as for a normal AAPCS64
// call to 'printf'.
//
// The 'args' argument should point to an array of variable arguments in their
// proper PCS registers (and in calling order). The argument registers can
// have mixed types. The format string (x0) should not be included.
void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
private:
#if DEBUG
// Tell whether any of the macro instruction can be used. When false the
// MacroAssembler will assert if a method which can emit a variable number
// of instructions is called.
bool allow_macro_instructions_ = true;
#endif
// Scratch registers available for use by the MacroAssembler.
CPURegList tmp_list_ = DefaultTmpList();
CPURegList fptmp_list_ = DefaultFPTmpList();
// Helps resolve branching to labels potentially out of range.
// If the label is not bound, it registers the information necessary to later
// be able to emit a veneer for this branch if necessary.
// If the label is bound, it returns true if the label (or the previous link
// in the label chain) is out of range. In that case the caller is responsible
// for generating appropriate code.
// Otherwise it returns false.
// This function also checks wether veneers need to be emitted.
bool NeedExtraInstructionsOrRegisterBranch(Label* label,
ImmBranchType branch_type);
void Movi16bitHelper(const VRegister& vd, uint64_t imm);
void Movi32bitHelper(const VRegister& vd, uint64_t imm);
void Movi64bitHelper(const VRegister& vd, uint64_t imm);
void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
LoadStoreOp op);
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
byte* pc);
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
@ -2148,38 +2121,38 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// emitted is what you specified when creating the scope.
class V8_NODISCARD InstructionAccurateScope {
public:
explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
: tasm_(tasm),
block_pool_(tasm, count * kInstrSize)
explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
: masm_(masm),
block_pool_(masm, count * kInstrSize)
#ifdef DEBUG
,
size_(count * kInstrSize)
#endif
{
tasm_->CheckVeneerPool(false, true, count * kInstrSize);
tasm_->StartBlockVeneerPool();
masm_->CheckVeneerPool(false, true, count * kInstrSize);
masm_->StartBlockVeneerPool();
#ifdef DEBUG
if (count != 0) {
tasm_->bind(&start_);
masm_->bind(&start_);
}
previous_allow_macro_instructions_ = tasm_->allow_macro_instructions();
tasm_->set_allow_macro_instructions(false);
previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
masm_->set_allow_macro_instructions(false);
#endif
}
~InstructionAccurateScope() {
tasm_->EndBlockVeneerPool();
masm_->EndBlockVeneerPool();
#ifdef DEBUG
if (start_.is_bound()) {
DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
}
tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
#endif
}
private:
TurboAssembler* tasm_;
TurboAssembler::BlockConstPoolScope block_pool_;
MacroAssembler* masm_;
MacroAssembler::BlockConstPoolScope block_pool_;
#ifdef DEBUG
size_t size_;
Label start_;
@ -2188,7 +2161,7 @@ class V8_NODISCARD InstructionAccurateScope {
};
// This scope utility allows scratch registers to be managed safely. The
// TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
// registers. These registers can be allocated on demand, and will be returned
// at the end of the scope.
//
@ -2198,9 +2171,9 @@ class V8_NODISCARD InstructionAccurateScope {
// order as the constructors. We do not have assertions for this.
class V8_NODISCARD UseScratchRegisterScope {
public:
explicit UseScratchRegisterScope(TurboAssembler* tasm)
: available_(tasm->TmpList()),
availablefp_(tasm->FPTmpList()),
explicit UseScratchRegisterScope(MacroAssembler* masm)
: available_(masm->TmpList()),
availablefp_(masm->FPTmpList()),
old_available_(available_->bits()),
old_availablefp_(availablefp_->bits()) {
DCHECK_EQ(available_->type(), CPURegister::kRegister);

View File

@ -166,7 +166,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
V(Function_string, function_string, FunctionString) \
V(function_to_string, function_to_string, FunctionToString) \
V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
V(Infinity_string, Infinity_string, InfinityString) \
V(is_concat_spreadable_symbol, is_concat_spreadable_symbol, \

View File

@ -21,11 +21,11 @@
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/label.h"
#include "src/codegen/macro-assembler-base.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/codegen/reloc-info.h"
#include "src/codegen/turbo-assembler.h"
#include "src/common/globals.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
@ -77,18 +77,18 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
void TurboAssembler::InitializeRootRegister() {
void MacroAssembler::InitializeRootRegister() {
ASM_CODE_COMMENT(this);
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, Immediate(isolate_root));
}
Operand TurboAssembler::RootAsOperand(RootIndex index) {
Operand MacroAssembler::RootAsOperand(RootIndex index) {
DCHECK(root_array_available());
return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
ASM_CODE_COMMENT(this);
if (root_array_available()) {
mov(destination, RootAsOperand(index));
@ -113,7 +113,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index)));
}
void TurboAssembler::CompareRoot(Register with, Register scratch,
void MacroAssembler::CompareRoot(Register with, Register scratch,
RootIndex index) {
ASM_CODE_COMMENT(this);
if (root_array_available()) {
@ -126,7 +126,7 @@ void TurboAssembler::CompareRoot(Register with, Register scratch,
}
}
void TurboAssembler::CompareRoot(Register with, RootIndex index) {
void MacroAssembler::CompareRoot(Register with, RootIndex index) {
ASM_CODE_COMMENT(this);
if (root_array_available()) {
cmp(with, RootAsOperand(index));
@ -180,7 +180,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
j(below_equal, on_in_range, near_jump);
}
void TurboAssembler::PushArray(Register array, Register size, Register scratch,
void MacroAssembler::PushArray(Register array, Register size, Register scratch,
PushArrayOrder order) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(array, size, scratch));
@ -206,7 +206,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
}
Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Operand MacroAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Register scratch) {
if (root_array_available() && options().enable_root_relative_access) {
intptr_t delta =
@ -233,8 +233,8 @@ Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
}
// TODO(v8:6666): If possible, refactor into a platform-independent function in
// TurboAssembler.
Operand TurboAssembler::ExternalReferenceAddressAsOperand(
// MacroAssembler.
Operand MacroAssembler::ExternalReferenceAddressAsOperand(
ExternalReference reference) {
DCHECK(root_array_available());
DCHECK(options().isolate_independent_code);
@ -244,8 +244,8 @@ Operand TurboAssembler::ExternalReferenceAddressAsOperand(
}
// TODO(v8:6666): If possible, refactor into a platform-independent function in
// TurboAssembler.
Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
// MacroAssembler.
Operand MacroAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
DCHECK(root_array_available());
Builtin builtin;
@ -264,7 +264,7 @@ Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
}
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
ASM_CODE_COMMENT(this);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
@ -273,7 +273,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
}
void TurboAssembler::LoadRootRegisterOffset(Register destination,
void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
ASM_CODE_COMMENT(this);
DCHECK(is_int32(offset));
@ -285,13 +285,13 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
mov(destination, Operand(kRootRegister, offset));
}
void TurboAssembler::LoadAddress(Register destination,
void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
// TODO(jgruber): Add support for enable_root_relative_access.
if (root_array_available() && options().isolate_independent_code) {
@ -301,7 +301,7 @@ void TurboAssembler::LoadAddress(Register destination,
mov(destination, Immediate(source));
}
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
int bytes = 0;
RegList saved_regs = kCallerSaved - exclusion;
@ -315,7 +315,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) {
ASM_CODE_COMMENT(this);
// We don't allow a GC in a write barrier slow path so there is no need to
@ -346,7 +346,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
@ -412,19 +412,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
}
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
void MacroAssembler::MaybeSaveRegisters(RegList registers) {
for (Register reg : registers) {
push(reg);
}
}
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
for (Register reg : base::Reversed(registers)) {
pop(reg);
}
}
void TurboAssembler::CallEphemeronKeyBarrier(Register object,
void MacroAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
ASM_CODE_COMMENT(this);
@ -449,7 +449,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object,
MaybeRestoreRegisters(registers);
}
void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
@ -473,7 +473,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
MaybeRestoreRegisters(registers);
}
void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
ASM_CODE_COMMENT(this);
@ -547,17 +547,17 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
}
}
void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
void MacroAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
xorps(dst, dst);
cvtsi2ss(dst, src);
}
void TurboAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
void MacroAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
xorpd(dst, dst);
cvtsi2sd(dst, src);
}
void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
void MacroAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
Label done;
Register src_reg = src.is_reg_only() ? src.reg() : tmp;
if (src_reg == tmp) mov(tmp, src);
@ -578,7 +578,7 @@ void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
bind(&done);
}
void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
void MacroAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
Label done;
cvttss2si(dst, src);
test(dst, dst);
@ -590,7 +590,7 @@ void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
bind(&done);
}
void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
void MacroAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
Label done;
cmp(src, Immediate(0));
ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
@ -600,14 +600,14 @@ void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
bind(&done);
}
void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
void MacroAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
Move(tmp, -2147483648.0);
addsd(tmp, src);
cvttsd2si(dst, tmp);
add(dst, Immediate(0x80000000));
}
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift);
if (shift >= 32) {
mov(high, low);
@ -619,7 +619,7 @@ void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
}
}
void TurboAssembler::ShlPair_cl(Register high, Register low) {
void MacroAssembler::ShlPair_cl(Register high, Register low) {
ASM_CODE_COMMENT(this);
shld_cl(high, low);
shl_cl(low);
@ -631,7 +631,7 @@ void TurboAssembler::ShlPair_cl(Register high, Register low) {
bind(&done);
}
void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift);
if (shift >= 32) {
mov(low, high);
@ -643,7 +643,7 @@ void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
}
}
void TurboAssembler::ShrPair_cl(Register high, Register low) {
void MacroAssembler::ShrPair_cl(Register high, Register low) {
ASM_CODE_COMMENT(this);
shrd_cl(low, high);
shr_cl(high);
@ -655,7 +655,7 @@ void TurboAssembler::ShrPair_cl(Register high, Register low) {
bind(&done);
}
void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
ASM_CODE_COMMENT(this);
DCHECK_GE(63, shift);
if (shift >= 32) {
@ -668,7 +668,7 @@ void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
}
}
void TurboAssembler::SarPair_cl(Register high, Register low) {
void MacroAssembler::SarPair_cl(Register high, Register low) {
ASM_CODE_COMMENT(this);
shrd_cl(low, high);
sar_cl(high);
@ -680,7 +680,7 @@ void TurboAssembler::SarPair_cl(Register high, Register low) {
bind(&done);
}
void TurboAssembler::LoadMap(Register destination, Register object) {
void MacroAssembler::LoadMap(Register destination, Register object) {
mov(destination, FieldOperand(object, HeapObject::kMapOffset));
}
@ -979,23 +979,23 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
void TurboAssembler::Assert(Condition cc, AbortReason reason) {
void MacroAssembler::Assert(Condition cc, AbortReason reason) {
if (v8_flags.debug_code) Check(cc, reason);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
void MacroAssembler::AssertUnreachable(AbortReason reason) {
if (v8_flags.debug_code) Abort(reason);
}
#endif // V8_ENABLE_DEBUG_CODE
void TurboAssembler::StubPrologue(StackFrame::Type type) {
void MacroAssembler::StubPrologue(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
push(Immediate(StackFrame::TypeToMarker(type)));
}
void TurboAssembler::Prologue() {
void MacroAssembler::Prologue() {
ASM_CODE_COMMENT(this);
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
@ -1004,7 +1004,7 @@ void TurboAssembler::Prologue() {
push(kJavaScriptCallArgCountRegister); // Actual argument count.
}
void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode) {
int receiver_bytes =
(mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
@ -1034,7 +1034,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
}
}
void TurboAssembler::DropArguments(Register count, Register scratch,
void MacroAssembler::DropArguments(Register count, Register scratch,
ArgumentsCountType type,
ArgumentsCountMode mode) {
DCHECK(!AreAliased(count, scratch));
@ -1043,7 +1043,7 @@ void TurboAssembler::DropArguments(Register count, Register scratch,
PushReturnAddressFrom(scratch);
}
void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Register receiver,
Register scratch,
ArgumentsCountType type,
@ -1055,7 +1055,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
PushReturnAddressFrom(scratch);
}
void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Operand receiver,
Register scratch,
ArgumentsCountType type,
@ -1068,7 +1068,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
PushReturnAddressFrom(scratch);
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::EnterFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
push(ebp);
mov(ebp, esp);
@ -1080,7 +1080,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
#endif // V8_ENABLE_WEBASSEMBLY
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
if (v8_flags.debug_code && !StackFrame::IsJavaScript(type)) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
@ -1091,7 +1091,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
}
#ifdef V8_OS_WIN
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
void MacroAssembler::AllocateStackSpace(Register bytes_scratch) {
ASM_CODE_COMMENT(this);
// In windows, we cannot increment the stack size by more than one page
// (minimum page size is 4KB) without accessing at least one byte on the
@ -1113,7 +1113,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
sub(esp, bytes_scratch);
}
void TurboAssembler::AllocateStackSpace(int bytes) {
void MacroAssembler::AllocateStackSpace(int bytes) {
ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
while (bytes >= kStackPageSize) {
@ -1332,10 +1332,10 @@ void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
cmp(with, Operand(kRootRegister, offset));
}
@ -1565,9 +1565,9 @@ void MacroAssembler::LoadNativeContextSlot(Register destination, int index) {
mov(destination, Operand(destination, Context::SlotOffset(index)));
}
void TurboAssembler::Ret() { ret(0); }
void MacroAssembler::Ret() { ret(0); }
void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
if (is_uint16(bytes_dropped)) {
ret(bytes_dropped);
} else {
@ -1578,7 +1578,7 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
}
}
void TurboAssembler::Push(Immediate value) {
void MacroAssembler::Push(Immediate value) {
if (root_array_available() && options().isolate_independent_code) {
if (value.is_embedded_object()) {
Push(HeapObjectAsOperand(value.embedded_object()));
@ -1597,13 +1597,13 @@ void MacroAssembler::Drop(int stack_elements) {
}
}
void TurboAssembler::Move(Register dst, Register src) {
void MacroAssembler::Move(Register dst, Register src) {
if (dst != src) {
mov(dst, src);
}
}
void TurboAssembler::Move(Register dst, const Immediate& src) {
void MacroAssembler::Move(Register dst, const Immediate& src) {
if (!src.is_heap_number_request() && src.is_zero()) {
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
} else if (src.is_external_reference()) {
@ -1613,7 +1613,7 @@ void TurboAssembler::Move(Register dst, const Immediate& src) {
}
}
void TurboAssembler::Move(Operand dst, const Immediate& src) {
void MacroAssembler::Move(Operand dst, const Immediate& src) {
// Since there's no scratch register available, take a detour through the
// stack.
if (root_array_available() && options().isolate_independent_code) {
@ -1632,9 +1632,9 @@ void TurboAssembler::Move(Operand dst, const Immediate& src) {
}
}
void TurboAssembler::Move(Register dst, Operand src) { mov(dst, src); }
void MacroAssembler::Move(Register dst, Operand src) { mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
void MacroAssembler::Move(Register dst, Handle<HeapObject> src) {
if (root_array_available() && options().isolate_independent_code) {
IndirectLoadConstant(dst, src);
return;
@ -1642,7 +1642,7 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
mov(dst, src);
}
void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
pxor(dst, dst);
} else {
@ -1666,7 +1666,7 @@ void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
}
}
void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
if (src == 0) {
pxor(dst, dst);
} else {
@ -1705,7 +1705,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
void MacroAssembler::PextrdPreSse41(Register dst, XMMRegister src,
uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@ -1721,7 +1721,7 @@ void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
add(esp, Immediate(kDoubleSize));
}
void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
uint32_t* load_pc_offset) {
// Without AVX or SSE, we can only have 64-bit values in xmm registers.
// We don't have an xmm scratch register, so move the data via the stack. This
@ -1742,7 +1742,7 @@ void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
add(esp, Immediate(kDoubleSize));
}
void TurboAssembler::Lzcnt(Register dst, Operand src) {
void MacroAssembler::Lzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcnt(dst, src);
@ -1756,7 +1756,7 @@ void TurboAssembler::Lzcnt(Register dst, Operand src) {
xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
}
void TurboAssembler::Tzcnt(Register dst, Operand src) {
void MacroAssembler::Tzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcnt(dst, src);
@ -1769,7 +1769,7 @@ void TurboAssembler::Tzcnt(Register dst, Operand src) {
bind(&not_zero_src);
}
void TurboAssembler::Popcnt(Register dst, Operand src) {
void MacroAssembler::Popcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcnt(dst, src);
@ -1816,7 +1816,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
}
}
void TurboAssembler::Check(Condition cc, AbortReason reason) {
void MacroAssembler::Check(Condition cc, AbortReason reason) {
Label L;
j(cc, &L);
Abort(reason);
@ -1824,7 +1824,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) {
bind(&L);
}
void TurboAssembler::CheckStackAlignment() {
void MacroAssembler::CheckStackAlignment() {
ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
@ -1839,7 +1839,7 @@ void TurboAssembler::CheckStackAlignment() {
}
}
void TurboAssembler::Abort(AbortReason reason) {
void MacroAssembler::Abort(AbortReason reason) {
if (v8_flags.code_comments) {
const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
@ -1882,7 +1882,7 @@ void TurboAssembler::Abort(AbortReason reason) {
int3();
}
void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
@ -1898,14 +1898,14 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
}
}
void TurboAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
// Trashing eax is ok as it will be the return value.
Move(eax, Immediate(function));
CallCFunction(eax, num_arguments);
}
void TurboAssembler::CallCFunction(Register function, int num_arguments) {
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
ASM_CODE_COMMENT(this);
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
@ -1956,7 +1956,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
}
}
void TurboAssembler::PushPC() {
void MacroAssembler::PushPC() {
// Push the current PC onto the stack as "return address" via calling
// the next instruction.
Label get_pc;
@ -1964,7 +1964,7 @@ void TurboAssembler::PushPC() {
bind(&get_pc);
}
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASM_CODE_COMMENT(this);
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
@ -1977,7 +1977,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
ASM_CODE_COMMENT(this);
static_assert(kSystemPointerSize == 4);
static_assert(kSmiShiftSize == 0);
@ -1993,13 +1993,13 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
IsolateData::builtin_entry_table_offset()));
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
ASM_CODE_COMMENT(this);
LoadEntryFromBuiltinIndex(builtin_index);
call(builtin_index);
}
void TurboAssembler::CallBuiltin(Builtin builtin) {
void MacroAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
switch (options().builtin_call_jump_mode) {
case BuiltinCallJumpMode::kAbsolute: {
@ -2019,7 +2019,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
}
}
void TurboAssembler::TailCallBuiltin(Builtin builtin) {
void MacroAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
switch (options().builtin_call_jump_mode) {
@ -2040,17 +2040,17 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
}
}
Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
Operand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
ASM_CODE_COMMENT(this);
return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
}
void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) {
void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) {
ASM_CODE_COMMENT(this);
mov(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset));
}
void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object) {
ASM_CODE_COMMENT(this);
// Compute the InstructionStream object pointer from the code entry point.
@ -2058,12 +2058,12 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
sub(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::CallCodeObject(Register code_object) {
void MacroAssembler::CallCodeObject(Register code_object) {
LoadCodeEntry(code_object, code_object);
call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
LoadCodeEntry(code_object, code_object);
switch (jump_mode) {
case JumpMode::kJump:
@ -2076,13 +2076,13 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
}
}
void TurboAssembler::Jump(const ExternalReference& reference) {
void MacroAssembler::Jump(const ExternalReference& reference) {
DCHECK(root_array_available());
jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
isolate(), reference)));
}
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
Builtin builtin = Builtin::kNoBuiltinId;
@ -2094,7 +2094,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode);
}
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
ASM_CODE_COMMENT(this);
@ -2113,7 +2113,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
j(cc, condition_met, condition_met_distance);
}
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
void MacroAssembler::ComputeCodeStartAddress(Register dst) {
ASM_CODE_COMMENT(this);
// In order to get the address of the current instruction, we first need
// to use a call and then use a pop, thus pushing the return address to
@ -2128,7 +2128,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
@ -2138,8 +2138,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::Trap() { int3(); }
void TurboAssembler::DebugBreak() { int3(); }
void MacroAssembler::Trap() { int3(); }
void MacroAssembler::DebugBreak() { int3(); }
} // namespace internal
} // namespace v8

View File

@ -21,10 +21,10 @@
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/label.h"
#include "src/codegen/macro-assembler-base.h"
#include "src/codegen/reglist.h"
#include "src/codegen/reloc-info.h"
#include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h"
#include "src/codegen/turbo-assembler.h"
#include "src/common/globals.h"
#include "src/execution/frames.h"
#include "src/handles/handles.h"
@ -68,10 +68,10 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
class V8_EXPORT_PRIVATE TurboAssembler
: public SharedTurboAssemblerBase<TurboAssembler> {
class V8_EXPORT_PRIVATE MacroAssembler
: public SharedMacroAssembler<MacroAssembler> {
public:
using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
using SharedMacroAssembler<MacroAssembler>::SharedMacroAssembler;
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
@ -411,17 +411,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
protected:
// Drops arguments assuming that the return address was already popped.
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
ArgumentsCountMode mode = kCountExcludesReceiver);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
void PushRoot(RootIndex index);
// Compare the object in a register to a value and jump if they are equal.
@ -671,6 +660,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void StackOverflowCheck(Register num_args, Register scratch,
Label* stack_overflow, bool include_receiver = false);
protected:
// Drops arguments assuming that the return address was already popped.
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
ArgumentsCountMode mode = kCountExcludesReceiver);
private:
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,

File diff suppressed because it is too large Load Diff

View File

@ -59,9 +59,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
using MacroAssemblerBase::MacroAssemblerBase;
// Activation support.
void EnterFrame(StackFrame::Type type);
@ -773,46 +773,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
protected:
inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
inline int32_t GetOffset(Label* L, OffsetSize bits);
private:
bool has_double_zero_reg_set_ = false;
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
const Operand& rk, bool need_link);
// f32 or f64
void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
CFRegister cd, bool f32 = true);
void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
bool f32 = true);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
// TODO(LOONG_dev): LOONG64: Remove this function once we stick with the
@ -1079,17 +1039,50 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
protected:
inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
inline int32_t GetOffset(Label* L, OffsetSize bits);
private:
bool has_double_zero_reg_set_ = false;
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
InvokeType type);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
const Operand& rk, bool need_link);
// f32 or f64
void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
CFRegister cd, bool f32 = true);
void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
bool f32 = true);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
template <typename Func>
void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
UseScratchRegisterScope scope(this);
Register scratch = scope.Acquire();

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/turbo-assembler.h"
#include "src/codegen/macro-assembler-base.h"
#include "src/builtins/builtins.h"
#include "src/builtins/constants-table-builder.h"
@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate,
MacroAssemblerBase::MacroAssemblerBase(Isolate* isolate,
const AssemblerOptions& options,
CodeObjectRequired create_code_object,
std::unique_ptr<AssemblerBuffer> buffer)
@ -26,7 +26,7 @@ TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate,
}
}
Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) {
Address MacroAssemblerBase::BuiltinEntry(Builtin builtin) {
DCHECK(Builtins::IsBuiltinId(builtin));
if (isolate_ != nullptr) {
Address entry = isolate_->builtin_entry_table()[Builtins::ToInt(builtin)];
@ -38,7 +38,7 @@ Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) {
return d.InstructionStartOfBuiltin(builtin);
}
void TurboAssemblerBase::IndirectLoadConstant(Register destination,
void MacroAssemblerBase::IndirectLoadConstant(Register destination,
Handle<HeapObject> object) {
CHECK(root_array_available_);
@ -71,7 +71,7 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
}
}
void TurboAssemblerBase::IndirectLoadExternalReference(
void MacroAssemblerBase::IndirectLoadExternalReference(
Register destination, ExternalReference reference) {
CHECK(root_array_available_);
@ -90,24 +90,24 @@ void TurboAssemblerBase::IndirectLoadExternalReference(
}
// static
int32_t TurboAssemblerBase::RootRegisterOffsetForRootIndex(
int32_t MacroAssemblerBase::RootRegisterOffsetForRootIndex(
RootIndex root_index) {
return IsolateData::root_slot_offset(root_index);
}
// static
int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
int32_t MacroAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
return IsolateData::BuiltinSlotOffset(builtin);
}
// static
intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference(
intptr_t MacroAssemblerBase::RootRegisterOffsetForExternalReference(
Isolate* isolate, const ExternalReference& reference) {
return static_cast<intptr_t>(reference.address() - isolate->isolate_root());
}
// static
int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
int32_t MacroAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
Isolate* isolate, const ExternalReference& reference) {
// Encode as an index into the external reference table stored on the
// isolate.
@ -120,13 +120,13 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
}
// static
bool TurboAssemblerBase::IsAddressableThroughRootRegister(
bool MacroAssemblerBase::IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference) {
Address address = reference.address();
return isolate->root_register_addressable_region().contains(address);
}
Tagged_t TurboAssemblerBase::ReadOnlyRootPtr(RootIndex index) {
Tagged_t MacroAssemblerBase::ReadOnlyRootPtr(RootIndex index) {
DCHECK(RootsTable::IsReadOnly(index));
CHECK(V8_STATIC_ROOTS_BOOL);
CHECK(isolate_->root(index).IsHeapObject());

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_TURBO_ASSEMBLER_H_
#define V8_CODEGEN_TURBO_ASSEMBLER_H_
#ifndef V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_
#define V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_
#include <memory>
@ -15,30 +15,24 @@
namespace v8 {
namespace internal {
// Common base class for platform-specific TurboAssemblers containing
// Common base class for platform-specific MacroAssemblers containing
// platform-independent bits.
// You will encounter two subclasses, TurboAssembler (derives from
// TurboAssemblerBase), and MacroAssembler (derives from TurboAssembler). The
// main difference is that MacroAssembler is allowed to access the isolate, and
// TurboAssembler accesses the isolate in a very limited way. TurboAssembler
// contains all the functionality that is used by Turbofan, and does not expect
// to be running on the main thread.
class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
// TODO(victorgomes): We should use LocalIsolate instead of Isolate in the
// methods of this class.
class V8_EXPORT_PRIVATE MacroAssemblerBase : public Assembler {
public:
// Constructors are declared public to inherit them in derived classes
// with `using` directive.
TurboAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object,
MacroAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object,
std::unique_ptr<AssemblerBuffer> buffer = {})
: TurboAssemblerBase(isolate, AssemblerOptions::Default(isolate),
: MacroAssemblerBase(isolate, AssemblerOptions::Default(isolate),
create_code_object, std::move(buffer)) {}
TurboAssemblerBase(Isolate* isolate, const AssemblerOptions& options,
MacroAssemblerBase(Isolate* isolate, const AssemblerOptions& options,
CodeObjectRequired create_code_object,
std::unique_ptr<AssemblerBuffer> buffer = {});
Isolate* isolate() const {
return isolate_;
}
Isolate* isolate() const { return isolate_; }
Handle<HeapObject> CodeObject() const {
DCHECK(!code_object_.is_null());
@ -135,25 +129,25 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
int comment_depth_ = 0;
DISALLOW_IMPLICIT_CONSTRUCTORS(TurboAssemblerBase);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssemblerBase);
};
// Avoids emitting calls to the {Builtin::kAbort} builtin when emitting
// debug code during the lifetime of this scope object.
class V8_NODISCARD HardAbortScope {
public:
explicit HardAbortScope(TurboAssemblerBase* assembler)
explicit HardAbortScope(MacroAssemblerBase* assembler)
: assembler_(assembler), old_value_(assembler->should_abort_hard()) {
assembler_->set_abort_hard(true);
}
~HardAbortScope() { assembler_->set_abort_hard(old_value_); }
private:
TurboAssemblerBase* assembler_;
MacroAssemblerBase* assembler_;
bool old_value_;
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_TURBO_ASSEMBLER_H_
#endif // V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_

View File

@ -5,7 +5,7 @@
#ifndef V8_CODEGEN_MACRO_ASSEMBLER_H_
#define V8_CODEGEN_MACRO_ASSEMBLER_H_
#include "src/codegen/turbo-assembler.h"
#include "src/codegen/macro-assembler-base.h"
#include "src/execution/frames.h"
#include "src/heap/heap.h"
@ -82,25 +82,25 @@ static constexpr int kMaxCParameters = 256;
class V8_NODISCARD FrameScope {
public:
explicit FrameScope(TurboAssembler* tasm, StackFrame::Type type)
explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
:
#ifdef V8_CODE_COMMENTS
comment_(tasm, frame_name(type)),
comment_(masm, frame_name(type)),
#endif
tasm_(tasm),
masm_(masm),
type_(type),
old_has_frame_(tasm->has_frame()) {
tasm->set_has_frame(true);
old_has_frame_(masm->has_frame()) {
masm->set_has_frame(true);
if (type != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
tasm->EnterFrame(type);
masm->EnterFrame(type);
}
}
~FrameScope() {
if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
tasm_->LeaveFrame(type_);
masm_->LeaveFrame(type_);
}
tasm_->set_has_frame(old_has_frame_);
masm_->set_has_frame(old_has_frame_);
}
private:
@ -125,7 +125,7 @@ class V8_NODISCARD FrameScope {
Assembler::CodeComment comment_;
#endif // V8_CODE_COMMENTS
TurboAssembler* tasm_;
MacroAssembler* masm_;
StackFrame::Type const type_;
bool const old_has_frame_;
};
@ -198,7 +198,7 @@ class V8_NODISCARD AllowExternalCallThatCantCauseGC : public FrameScope {
// scope object.
class V8_NODISCARD NoRootArrayScope {
public:
explicit NoRootArrayScope(TurboAssembler* masm)
explicit NoRootArrayScope(MacroAssembler* masm)
: masm_(masm), old_value_(masm->root_array_available()) {
masm->set_root_array_available(false);
}
@ -206,7 +206,7 @@ class V8_NODISCARD NoRootArrayScope {
~NoRootArrayScope() { masm_->set_root_array_available(old_value_); }
private:
TurboAssembler* masm_;
MacroAssembler* masm_;
bool old_value_;
};

View File

@ -819,7 +819,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
Instr instr_b = REGIMM | BGEZAL; // Branch and link.
instr_b = SetBranchOffset(pos, target_pos, instr_b);
// Correct ra register to point to one instruction after jalr from
// TurboAssembler::BranchAndLinkLong.
// MacroAssembler::BranchAndLinkLong.
Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
kOptimizedBranchAndLinkLongReturnOffset;

View File

@ -294,7 +294,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Adjust ra register in branch delay slot of bal instruction so to skip
// instructions not needed after optimization of PIC in
// TurboAssembler::BranchAndLink method.
// MacroAssembler::BranchAndLink method.
static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize;

File diff suppressed because it is too large Load Diff

View File

@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset);
}
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
using MacroAssemblerBase::MacroAssemblerBase;
// Activation support.
void EnterFrame(StackFrame::Type type);
@ -913,79 +913,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
private:
bool has_double_zero_reg_set_ = false;
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2);
void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
FPURegister cmp2);
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
// TODO(mips) Reorder parameters so out parameters come last.
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot);
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot);
void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
void BranchAndLinkShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot);
void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot);
void BranchLong(Label* L, BranchDelaySlot bdslot);
void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
template <typename RoundFunc>
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
RoundFunc round);
template <typename RoundFunc>
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
RoundFunc round);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
// TODO(victorgomes): Remove this function once we stick with the reversed
@ -1087,9 +1014,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
// stack_space - extra stack space.
@ -1269,17 +1193,83 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
private:
bool has_double_zero_reg_set_ = false;
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
InvokeType type);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2);
void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
FPURegister cmp2);
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
// TODO(mips) Reorder parameters so out parameters come last.
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot);
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot);
void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
void BranchAndLinkShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot);
void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot);
void BranchLong(Label* L, BranchDelaySlot bdslot);
void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
template <typename RoundFunc>
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
RoundFunc round);
template <typename RoundFunc>
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
RoundFunc round);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
template <typename Func>
void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
// Ensure that dd-ed labels following this instruction use 8 bytes aligned
// addresses.

View File

@ -148,7 +148,7 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc,
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {

View File

@ -1570,7 +1570,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
private:
friend class Assembler;
friend class TurboAssembler;
friend class MacroAssembler;
Assembler* assembler_;
RegList old_available_;

View File

@ -151,7 +151,7 @@ enum Condition {
kNotZero = 16,
};
inline Condition check_condition(Condition cond) {
inline Condition to_condition(Condition cond) {
switch (cond) {
case kUnsignedLessThan:
return lt;
@ -171,6 +171,31 @@ inline Condition check_condition(Condition cond) {
return cond;
}
inline bool is_signed(Condition cond) {
switch (cond) {
case kEqual:
case kNotEqual:
case kLessThan:
case kGreaterThan:
case kLessThanEqual:
case kGreaterThanEqual:
case kOverflow:
case kNoOverflow:
case kZero:
case kNotZero:
return true;
case kUnsignedLessThan:
case kUnsignedGreaterThan:
case kUnsignedLessThanEqual:
case kUnsignedGreaterThanEqual:
return false;
default:
UNREACHABLE();
}
}
inline Condition NegateCondition(Condition cond) {
DCHECK(cond != al);
return static_cast<Condition>(cond ^ ne);

File diff suppressed because it is too large Load Diff

View File

@ -47,9 +47,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
#define ClearRightImm clrrwi
#endif
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
using MacroAssemblerBase::MacroAssemblerBase;
void CallBuiltin(Builtin builtin, Condition cond = al);
void TailCallBuiltin(Builtin builtin, Condition cond = al,
@ -1010,19 +1010,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#endif
}
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
void LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadTaggedField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
void LoadTaggedSignedField(Register destination, MemOperand field_operand,
Register scratch);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
// Compresses and stores tagged value to given on-heap location.
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand,
@ -1030,11 +1024,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
void DecompressTaggedSigned(Register destination, Register src);
void DecompressTaggedPointer(Register destination, MemOperand field_operand);
void DecompressTaggedPointer(Register destination, Register source);
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
void DecompressAnyTagged(Register destination, MemOperand field_operand);
void DecompressAnyTagged(Register destination, Register source);
void DecompressTagged(Register destination, MemOperand field_operand);
void DecompressTagged(Register destination, Register source);
void DecompressTagged(const Register& destination, Tagged_t immediate);
void LoadF64(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
@ -1438,21 +1430,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void S128Select(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register mask);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments,
bool has_function_descriptor);
};
// MacroAssembler implements a collection of frequently used acros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
// TODO(victorgomes): Remove this function once we stick with the reversed
@ -1745,6 +1722,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments,
bool has_function_descriptor);
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,

View File

@ -162,7 +162,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {

File diff suppressed because it is too large Load Diff

View File

@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset);
}
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
using MacroAssemblerBase::MacroAssemblerBase;
// Activation support.
void EnterFrame(StackFrame::Type type);
@ -1072,14 +1072,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// ---------------------------------------------------------------------------
// Pointer compression Support
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
void LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand);
void LoadTaggedField(const Register& destination,
const MemOperand& field_operand);
// Loads a field containing a tagged signed value and decompresses it if
// necessary.
@ -1095,12 +1090,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
void DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand);
void DecompressTaggedPointer(const Register& destination,
const Register& source);
void DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand);
void DecompressTagged(const Register& destination,
const MemOperand& field_operand);
void DecompressTagged(const Register& destination, const Register& source);
void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) {
if (COMPRESS_POINTERS_BOOL) {
Sub32(rd, rs1, rs2);
@ -1113,12 +1105,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pointer compression Support
// rv32 don't support Pointer compression. Defines these functions for
// simplify builtins.
inline void LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand) {
Lw(destination, field_operand);
}
inline void LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand) {
inline void LoadTaggedField(const Register& destination,
const MemOperand& field_operand) {
Lw(destination, field_operand);
}
inline void LoadTaggedSignedField(const Register& destination,
@ -1174,71 +1162,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src);
void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst);
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
private:
bool has_double_zero_reg_set_ = false;
bool has_single_zero_reg_set_ = false;
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
// TODO(RISCV) Reorder parameters so out parameters come last.
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt);
void BranchShortHelper(int32_t offset, Label* L);
bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt);
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt);
void BranchAndLinkShortHelper(int32_t offset, Label* L);
void BranchAndLinkShort(int32_t offset);
void BranchAndLinkShort(Label* L);
bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
void BranchAndLinkLong(Label* L);
#if V8_TARGET_ARCH_RISCV64
template <typename F_TYPE>
void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
#elif V8_TARGET_ARCH_RISCV32
void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
#endif
template <typename F>
void RoundHelper(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch, FPURoundingMode frm);
template <typename TruncFunc>
void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
TruncFunc trunc);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
// TODO(victorgomes): Remove this function once we stick with the reversed
@ -1521,7 +1444,65 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
private:
bool has_double_zero_reg_set_ = false;
bool has_single_zero_reg_set_ = false;
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
// TODO(RISCV) Reorder parameters so out parameters come last.
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt);
void BranchShortHelper(int32_t offset, Label* L);
bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt);
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt);
void BranchAndLinkShortHelper(int32_t offset, Label* L);
void BranchAndLinkShort(int32_t offset);
void BranchAndLinkShort(Label* L);
bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
void BranchAndLinkLong(Label* L);
#if V8_TARGET_ARCH_RISCV64
template <typename F_TYPE>
void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
#elif V8_TARGET_ARCH_RISCV32
void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
#endif
template <typename F>
void RoundHelper(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch, FPURoundingMode frm);
template <typename TruncFunc>
void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
TruncFunc trunc);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
@ -1538,7 +1519,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
};
template <typename Func>
void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
// Ensure that dd-ed labels following this instruction use 8 bytes aligned
// addresses.

View File

@ -142,7 +142,7 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {

View File

@ -1494,7 +1494,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
private:
friend class Assembler;
friend class TurboAssembler;
friend class MacroAssembler;
Assembler* assembler_;
RegList old_available_;

View File

@ -123,7 +123,7 @@ enum Condition {
kNotZero = 21,
};
inline Condition check_condition(Condition cond) {
inline Condition to_condition(Condition cond) {
switch (cond) {
case kUnsignedLessThan:
return lt;
@ -143,6 +143,31 @@ inline Condition check_condition(Condition cond) {
return cond;
}
inline bool is_signed(Condition cond) {
switch (cond) {
case kEqual:
case kNotEqual:
case kLessThan:
case kGreaterThan:
case kLessThanEqual:
case kGreaterThanEqual:
case kOverflow:
case kNoOverflow:
case kZero:
case kNotZero:
return true;
case kUnsignedLessThan:
case kUnsignedGreaterThan:
case kUnsignedLessThanEqual:
case kUnsignedGreaterThanEqual:
return false;
default:
UNREACHABLE();
}
}
inline Condition NegateCondition(Condition cond) {
DCHECK(cond != al);
switch (cond) {

File diff suppressed because it is too large Load Diff

View File

@ -41,9 +41,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
using MacroAssemblerBase::MacroAssemblerBase;
void CallBuiltin(Builtin builtin, Condition cond = al);
void TailCallBuiltin(Builtin builtin, Condition cond = al);
@ -1464,17 +1464,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#endif
}
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
void LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
void LoadTaggedSignedField(Register destination, MemOperand field_operand);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
void LoadTaggedField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
void LoadTaggedSignedField(Register destination, MemOperand field_operand);
// Loads a field containing smi value and untags it.
void SmiUntagField(Register dst, const MemOperand& src);
@ -1486,11 +1480,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
void DecompressTaggedSigned(Register destination, Register src);
void DecompressTaggedPointer(Register destination, MemOperand field_operand);
void DecompressTaggedPointer(Register destination, Register source);
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
void DecompressAnyTagged(Register destination, MemOperand field_operand);
void DecompressAnyTagged(Register destination, Register source);
void DecompressTagged(Register destination, MemOperand field_operand);
void DecompressTagged(Register destination, Register source);
void DecompressTagged(const Register& destination, Tagged_t immediate);
// CountLeadingZeros will corrupt the scratch register pair (eg. r0:r1)
void CountLeadingZerosU32(Register dst, Register src,
@ -1502,22 +1494,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CountTrailingZerosU64(Register dst, Register src,
Register scratch_pair = r0);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
void LoadStackLimit(Register destination, StackLimitKind kind);
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
@ -1803,6 +1779,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,

View File

@ -27,7 +27,7 @@
namespace v8 {
namespace internal {
void SharedTurboAssembler::Move(Register dst, uint32_t src) {
void SharedMacroAssemblerBase::Move(Register dst, uint32_t src) {
// Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32
mov(dst, Immediate(src));
@ -38,7 +38,7 @@ void SharedTurboAssembler::Move(Register dst, uint32_t src) {
#endif
}
void SharedTurboAssembler::Move(Register dst, Register src) {
void SharedMacroAssemblerBase::Move(Register dst, Register src) {
// Helper to paper over the different assembler function names.
if (dst != src) {
#if V8_TARGET_ARCH_IA32
@ -51,7 +51,7 @@ void SharedTurboAssembler::Move(Register dst, Register src) {
}
}
void SharedTurboAssembler::Add(Register dst, Immediate src) {
void SharedMacroAssemblerBase::Add(Register dst, Immediate src) {
// Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32
add(dst, src);
@ -62,7 +62,7 @@ void SharedTurboAssembler::Add(Register dst, Immediate src) {
#endif
}
void SharedTurboAssembler::And(Register dst, Immediate src) {
void SharedMacroAssemblerBase::And(Register dst, Immediate src) {
// Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32
and_(dst, src);
@ -77,8 +77,8 @@ void SharedTurboAssembler::And(Register dst, Immediate src) {
#endif
}
void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1,
Operand src2) {
void SharedMacroAssemblerBase::Movhps(XMMRegister dst, XMMRegister src1,
Operand src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovhps(dst, src1, src2);
@ -90,8 +90,8 @@ void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1,
}
}
void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1,
Operand src2) {
void SharedMacroAssemblerBase::Movlps(XMMRegister dst, XMMRegister src1,
Operand src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovlps(dst, src1, src2);
@ -102,8 +102,8 @@ void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1,
movlps(dst, src2);
}
}
void SharedTurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister mask) {
void SharedMacroAssemblerBase::Blendvpd(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vblendvpd(dst, src1, src2, mask);
@ -115,8 +115,8 @@ void SharedTurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1,
}
}
void SharedTurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister mask) {
void SharedMacroAssemblerBase::Blendvps(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vblendvps(dst, src1, src2, mask);
@ -128,8 +128,8 @@ void SharedTurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1,
}
}
void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister mask) {
void SharedMacroAssemblerBase::Pblendvb(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpblendvb(dst, src1, src2, mask);
@ -141,8 +141,8 @@ void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
}
}
void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
XMMRegister src2, uint8_t imm8) {
void SharedMacroAssemblerBase::Shufps(XMMRegister dst, XMMRegister src1,
XMMRegister src2, uint8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vshufps(dst, src1, src2, imm8);
@ -154,8 +154,8 @@ void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
}
}
void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
uint8_t lane) {
void SharedMacroAssemblerBase::F64x2ExtractLane(DoubleRegister dst,
XMMRegister src, uint8_t lane) {
ASM_CODE_COMMENT(this);
if (lane == 0) {
if (dst != src) {
@ -173,8 +173,10 @@ void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
}
}
void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
DoubleRegister rep, uint8_t lane) {
void SharedMacroAssemblerBase::F64x2ReplaceLane(XMMRegister dst,
XMMRegister src,
DoubleRegister rep,
uint8_t lane) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@ -197,8 +199,8 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
}
}
void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
void SharedMacroAssemblerBase::F32x4Min(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// The minps instruction doesn't propagate NaNs and +0's in its first
// operand. Perform minps in both orders, merge the results, and adjust.
@ -226,8 +228,8 @@ void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs,
Andnps(dst, dst, scratch);
}
void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
void SharedMacroAssemblerBase::F32x4Max(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// The maxps instruction doesn't propagate NaNs and +0's in its first
// operand. Perform maxps in both orders, merge the results, and adjust.
@ -258,8 +260,8 @@ void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs,
Andnps(dst, dst, scratch);
}
void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
void SharedMacroAssemblerBase::F64x2Min(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@ -296,8 +298,8 @@ void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
}
}
void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
void SharedMacroAssemblerBase::F64x2Max(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@ -336,7 +338,7 @@ void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
}
}
void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
void SharedMacroAssemblerBase::F32x4Splat(XMMRegister dst, DoubleRegister src) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
@ -354,8 +356,8 @@ void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
}
}
void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
uint8_t lane) {
void SharedMacroAssemblerBase::F32x4ExtractLane(FloatRegister dst,
XMMRegister src, uint8_t lane) {
ASM_CODE_COMMENT(this);
DCHECK_LT(lane, 4);
// These instructions are shorter than insertps, but will leave junk in
@ -376,8 +378,8 @@ void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
}
}
void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
uint8_t laneidx) {
void SharedMacroAssemblerBase::S128Store32Lane(Operand dst, XMMRegister src,
uint8_t laneidx) {
ASM_CODE_COMMENT(this);
if (laneidx == 0) {
Movss(dst, src);
@ -388,8 +390,8 @@ void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
}
template <typename Op>
void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
XMMRegister scratch) {
void SharedMacroAssemblerBase::I8x16SplatPreAvx2(XMMRegister dst, Op src,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
DCHECK(!CpuFeatures::IsSupported(AVX2));
CpuFeatureScope ssse3_scope(this, SSSE3);
@ -398,8 +400,8 @@ void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
Pshufb(dst, scratch);
}
void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
XMMRegister scratch) {
void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Register src,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
@ -410,8 +412,8 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
}
}
void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
XMMRegister scratch) {
void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Operand src,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
DCHECK_OPERAND_IS_NOT_REG(src);
if (CpuFeatures::IsSupported(AVX2)) {
@ -422,9 +424,9 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
}
}
void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
uint8_t src2, Register tmp1,
XMMRegister tmp2) {
void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1,
uint8_t src2, Register tmp1,
XMMRegister tmp2) {
ASM_CODE_COMMENT(this);
DCHECK_NE(dst, tmp2);
// Perform 16-bit shift, then mask away low bits.
@ -444,9 +446,9 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
Pand(dst, tmp2);
}
void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) {
void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK(!AreAliased(src1, tmp2, tmp3));
@ -471,8 +473,8 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
Psllw(dst, dst, tmp3);
}
void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
uint8_t src2, XMMRegister tmp) {
void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1,
uint8_t src2, XMMRegister tmp) {
ASM_CODE_COMMENT(this);
// Unpack bytes into words, do word (16-bit) shifts, and repack.
DCHECK_NE(dst, tmp);
@ -485,9 +487,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
Packsswb(dst, tmp);
}
void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) {
void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK_NE(src1, tmp2);
@ -506,9 +508,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
Packsswb(dst, tmp2);
}
void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
uint8_t src2, Register tmp1,
XMMRegister tmp2) {
void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1,
uint8_t src2, Register tmp1,
XMMRegister tmp2) {
ASM_CODE_COMMENT(this);
DCHECK_NE(dst, tmp2);
if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
@ -528,9 +530,9 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
Pand(dst, tmp2);
}
void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) {
void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK_NE(src1, tmp2);
@ -550,14 +552,14 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
}
template <typename Op>
void SharedTurboAssembler::I16x8SplatPreAvx2(XMMRegister dst, Op src) {
void SharedMacroAssemblerBase::I16x8SplatPreAvx2(XMMRegister dst, Op src) {
DCHECK(!CpuFeatures::IsSupported(AVX2));
Movd(dst, src);
Pshuflw(dst, dst, uint8_t{0x0});
Punpcklqdq(dst, dst);
}
void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Register src) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
@ -568,7 +570,7 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
}
}
void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Operand src) {
ASM_CODE_COMMENT(this);
DCHECK_OPERAND_IS_NOT_REG(src);
if (CpuFeatures::IsSupported(AVX2)) {
@ -579,18 +581,20 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
}
}
void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch,
bool is_signed) {
void SharedMacroAssemblerBase::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
XMMRegister src2,
XMMRegister scratch,
bool is_signed) {
ASM_CODE_COMMENT(this);
is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1);
is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2);
Pmullw(dst, scratch);
}
void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
XMMRegister src2,
XMMRegister scratch) {
void SharedMacroAssemblerBase::I16x8ExtMulHighS(XMMRegister dst,
XMMRegister src1,
XMMRegister src2,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -612,9 +616,10 @@ void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
}
}
void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
XMMRegister src2,
XMMRegister scratch) {
void SharedMacroAssemblerBase::I16x8ExtMulHighU(XMMRegister dst,
XMMRegister src1,
XMMRegister src2,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// The logic here is slightly complicated to handle all the cases of register
// aliasing. This allows flexibility for callers in TurboFan and Liftoff.
@ -662,8 +667,8 @@ void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
}
}
void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
XMMRegister src) {
void SharedMacroAssemblerBase::I16x8SConvertI8x16High(XMMRegister dst,
XMMRegister src) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -685,9 +690,9 @@ void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
}
}
void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
void SharedMacroAssemblerBase::I16x8UConvertI8x16High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -711,9 +716,10 @@ void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
}
}
void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
XMMRegister src2,
XMMRegister scratch) {
void SharedMacroAssemblerBase::I16x8Q15MulRSatS(XMMRegister dst,
XMMRegister src1,
XMMRegister src2,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// k = i16x8.splat(0x8000)
Pcmpeqd(scratch, scratch);
@ -729,9 +735,9 @@ void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
Pxor(dst, scratch);
}
void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst,
XMMRegister src1,
XMMRegister src2) {
void SharedMacroAssemblerBase::I16x8DotI8x16I7x16S(XMMRegister dst,
XMMRegister src1,
XMMRegister src2) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -744,7 +750,7 @@ void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst,
}
}
void SharedTurboAssembler::I32x4DotI8x16I7x16AddS(
void SharedMacroAssemblerBase::I32x4DotI8x16I7x16AddS(
XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister src3,
XMMRegister scratch, XMMRegister splat_reg) {
ASM_CODE_COMMENT(this);
@ -768,9 +774,9 @@ void SharedTurboAssembler::I32x4DotI8x16I7x16AddS(
}
}
void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
XMMRegister src,
XMMRegister tmp) {
void SharedMacroAssemblerBase::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
XMMRegister src,
XMMRegister tmp) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -812,9 +818,10 @@ void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
// 1. Multiply low word into scratch.
// 2. Multiply high word (can be signed or unsigned) into dst.
// 3. Unpack and interleave scratch and dst into dst.
void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch,
bool low, bool is_signed) {
void SharedMacroAssemblerBase::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
XMMRegister src2,
XMMRegister scratch, bool low,
bool is_signed) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -830,8 +837,8 @@ void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
}
}
void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
XMMRegister src) {
void SharedMacroAssemblerBase::I32x4SConvertI16x8High(XMMRegister dst,
XMMRegister src) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -853,9 +860,9 @@ void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
}
}
void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
void SharedMacroAssemblerBase::I32x4UConvertI16x8High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -879,8 +886,8 @@ void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
}
}
void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
void SharedMacroAssemblerBase::I64x2Neg(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@ -896,8 +903,8 @@ void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
}
}
void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
void SharedMacroAssemblerBase::I64x2Abs(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -917,8 +924,8 @@ void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
}
}
void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
XMMRegister src1, XMMRegister scratch) {
void SharedMacroAssemblerBase::I64x2GtS(XMMRegister dst, XMMRegister src0,
XMMRegister src1, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -951,8 +958,8 @@ void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
}
}
void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
XMMRegister src1, XMMRegister scratch) {
void SharedMacroAssemblerBase::I64x2GeS(XMMRegister dst, XMMRegister src0,
XMMRegister src1, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -986,8 +993,8 @@ void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
}
}
void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
uint8_t shift, XMMRegister xmm_tmp) {
void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src,
uint8_t shift, XMMRegister xmm_tmp) {
ASM_CODE_COMMENT(this);
DCHECK_GT(64, shift);
DCHECK_NE(xmm_tmp, dst);
@ -1019,10 +1026,10 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Psubq(dst, xmm_tmp);
}
void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Register shift, XMMRegister xmm_tmp,
XMMRegister xmm_shift,
Register tmp_shift) {
void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src,
Register shift, XMMRegister xmm_tmp,
XMMRegister xmm_shift,
Register tmp_shift) {
ASM_CODE_COMMENT(this);
DCHECK_NE(xmm_tmp, dst);
DCHECK_NE(xmm_tmp, src);
@ -1049,9 +1056,9 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Psubq(dst, xmm_tmp);
}
void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister tmp1,
XMMRegister tmp2) {
void SharedMacroAssemblerBase::I64x2Mul(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister tmp1,
XMMRegister tmp2) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp1, tmp2));
DCHECK(!AreAliased(lhs, tmp1, tmp2));
@ -1099,9 +1106,10 @@ void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs,
// 2. Unpack src1, src0 into even-number elements of dst.
// 3. Multiply 1. with 2.
// For non-AVX, use non-destructive pshufd instead of punpckldq/punpckhdq.
void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch,
bool low, bool is_signed) {
void SharedMacroAssemblerBase::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
XMMRegister src2,
XMMRegister scratch, bool low,
bool is_signed) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -1130,8 +1138,8 @@ void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
}
}
void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
XMMRegister src) {
void SharedMacroAssemblerBase::I64x2SConvertI32x4High(XMMRegister dst,
XMMRegister src) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -1148,9 +1156,9 @@ void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
}
}
void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
void SharedMacroAssemblerBase::I64x2UConvertI32x4High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@ -1170,8 +1178,8 @@ void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
}
}
void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
void SharedMacroAssemblerBase::S128Not(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (dst == src) {
Pcmpeqd(scratch, scratch);
@ -1182,9 +1190,9 @@ void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
}
}
void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
XMMRegister src1, XMMRegister src2,
XMMRegister scratch) {
void SharedMacroAssemblerBase::S128Select(XMMRegister dst, XMMRegister mask,
XMMRegister src1, XMMRegister src2,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)).
// pandn(x, y) = !x & y, so we have to flip the mask and input.
@ -1203,8 +1211,8 @@ void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
}
}
void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
XMMRegister scratch) {
void SharedMacroAssemblerBase::S128Load8Splat(XMMRegister dst, Operand src,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
@ -1226,8 +1234,8 @@ void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
}
}
void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
XMMRegister scratch) {
void SharedMacroAssemblerBase::S128Load16Splat(XMMRegister dst, Operand src,
XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
@ -1248,7 +1256,7 @@ void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
}
}
void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
void SharedMacroAssemblerBase::S128Load32Splat(XMMRegister dst, Operand src) {
ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
@ -1262,8 +1270,8 @@ void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
}
}
void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
uint8_t laneidx) {
void SharedMacroAssemblerBase::S128Store64Lane(Operand dst, XMMRegister src,
uint8_t laneidx) {
ASM_CODE_COMMENT(this);
if (laneidx == 0) {
Movlps(dst, src);
@ -1342,27 +1350,27 @@ void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
sub##ps_or_pd(dst, tmp); \
}
void SharedTurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3,
XMMRegister tmp) {
void SharedMacroAssemblerBase::F32x4Qfma(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3,
XMMRegister tmp) {
QFMA(ps)
}
void SharedTurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3,
XMMRegister tmp) {
void SharedMacroAssemblerBase::F32x4Qfms(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3,
XMMRegister tmp) {
QFMS(ps)
}
void SharedTurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3,
XMMRegister tmp) {
void SharedMacroAssemblerBase::F64x2Qfma(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3,
XMMRegister tmp) {
QFMA(pd);
}
void SharedTurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3,
XMMRegister tmp) {
void SharedMacroAssemblerBase::F64x2Qfms(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3,
XMMRegister tmp) {
QFMS(pd);
}

View File

@ -8,7 +8,7 @@
#include "src/base/macros.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/external-reference.h"
#include "src/codegen/turbo-assembler.h"
#include "src/codegen/macro-assembler-base.h"
#if V8_TARGET_ARCH_IA32
#include "src/codegen/ia32/register-ia32.h"
@ -30,15 +30,15 @@ constexpr int kStackSavedSavedFPSize = 2 * kDoubleSize;
constexpr int kStackSavedSavedFPSize = kDoubleSize;
#endif // V8_ENABLE_WEBASSEMBLY
// Base class for SharedTurboAssemblerBase. This class contains macro-assembler
// Base class for SharedMacroAssembler. This class contains macro-assembler
// functions that can be shared across ia32 and x64 without any template
// machinery, i.e. does not require the CRTP pattern that
// SharedTurboAssemblerBase exposes. This allows us to keep the bulk of
// SharedMacroAssembler exposes. This allows us to keep the bulk of
// definition inside a separate source file, rather than putting everything
// inside this header.
class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
class V8_EXPORT_PRIVATE SharedMacroAssemblerBase : public MacroAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
using MacroAssemblerBase::MacroAssemblerBase;
void Move(Register dst, uint32_t src);
// Move if registers are not identical.
@ -530,41 +530,41 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I16x8SplatPreAvx2(XMMRegister dst, Op src);
};
// Common base class template shared by ia32 and x64 TurboAssembler. This uses
// Common base class template shared by ia32 and x64 MacroAssembler. This uses
// the Curiously Recurring Template Pattern (CRTP), where Impl is the actual
// class (subclass of SharedTurboAssemblerBase instantiated with the actual
// class (subclass of SharedMacroAssembler instantiated with the actual
// class). This allows static polymorphism, where member functions can be move
// into SharedTurboAssembler, and we can also call into member functions
// defined in ia32 or x64 specific TurboAssembler from within this template
// into SharedMacroAssemblerBase, and we can also call into member functions
// defined in ia32 or x64 specific MacroAssembler from within this template
// class, via Impl.
//
// Note: all member functions must be defined in this header file so that the
// compiler can generate code for the function definitions. See
// https://isocpp.org/wiki/faq/templates#templates-defn-vs-decl for rationale.
// If a function does not need polymorphism, move it into SharedTurboAssembler,
// and define it outside of this header.
// If a function does not need polymorphism, move it into
// SharedMacroAssemblerBase, and define it outside of this header.
template <typename Impl>
class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
using SharedTurboAssembler::SharedTurboAssembler;
class V8_EXPORT_PRIVATE SharedMacroAssembler : public SharedMacroAssemblerBase {
using SharedMacroAssemblerBase::SharedMacroAssemblerBase;
public:
void Abspd(XMMRegister dst, XMMRegister src, Register tmp) {
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps,
ExternalReference::address_of_double_abs_constant());
}
void Absps(XMMRegister dst, XMMRegister src, Register tmp) {
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps,
ExternalReference::address_of_float_abs_constant());
}
void Negpd(XMMRegister dst, XMMRegister src, Register tmp) {
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps,
ExternalReference::address_of_double_neg_constant());
}
void Negps(XMMRegister dst, XMMRegister src, Register tmp) {
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps,
ExternalReference::address_of_float_neg_constant());
}
#undef FLOAT_UNOP
@ -975,15 +975,16 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
return impl()->ExternalReferenceAsOperand(reference, scratch);
}
using FloatInstruction = void (SharedTurboAssembler::*)(XMMRegister,
XMMRegister, Operand);
using FloatInstruction = void (SharedMacroAssemblerBase::*)(XMMRegister,
XMMRegister,
Operand);
void FloatUnop(XMMRegister dst, XMMRegister src, Register tmp,
FloatInstruction op, ExternalReference ext) {
if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
movaps(dst, src);
src = dst;
}
SharedTurboAssembler* assm = this;
SharedMacroAssemblerBase* assm = this;
(assm->*op)(dst, src, ExternalReferenceAsOperand(ext, tmp));
}
};

View File

@ -79,10 +79,10 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(Isolate* isolate,
}
SourcePositionInfo SourcePosition::FirstInfo(Isolate* isolate,
Handle<Code> code) const {
Code code) const {
DisallowGarbageCollection no_gc;
DeoptimizationData deopt_data =
DeoptimizationData::cast(code->deoptimization_data());
DeoptimizationData::cast(code.deoptimization_data());
SourcePosition pos = *this;
if (pos.isInlined()) {
InliningPosition inl = deopt_data.InliningPositions().get(pos.InliningId());

View File

@ -83,7 +83,7 @@ class SourcePosition final {
Code code) const;
std::vector<SourcePositionInfo> InliningStack(
OptimizedCompilationInfo* cinfo) const;
SourcePositionInfo FirstInfo(Isolate* isolate, Handle<Code> code) const;
SourcePositionInfo FirstInfo(Isolate* isolate, Code code) const;
void Print(std::ostream& out, InstructionStream code) const;
void PrintJson(std::ostream& out) const;

View File

@ -283,8 +283,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
if (IsCompressedEmbeddedObject(rmode_)) {
Tagged_t compressed = ReadUnalignedValue<Tagged_t>(pc_);
DCHECK(!HAS_SMI_TAG(compressed));
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
compressed));
Object obj(
V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
// Embedding of compressed InstructionStream objects must not happen when
// external code space is enabled, because Codes must be used
// instead.

File diff suppressed because it is too large Load Diff

View File

@ -55,10 +55,10 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
class V8_EXPORT_PRIVATE TurboAssembler
: public SharedTurboAssemblerBase<TurboAssembler> {
class V8_EXPORT_PRIVATE MacroAssembler
: public SharedMacroAssembler<MacroAssembler> {
public:
using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
using SharedMacroAssembler<MacroAssembler>::SharedMacroAssembler;
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
@ -583,35 +583,21 @@ class V8_EXPORT_PRIVATE TurboAssembler
// ---------------------------------------------------------------------------
// Pointer compression support
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
void LoadTaggedPointerField(Register destination, Operand field_operand);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadTaggedField(Register destination, Operand field_operand);
// Loads a field containing a HeapObject but does not decompress it when
// Loads a field containing any tagged value but does not decompress it when
// pointer compression is enabled.
void LoadTaggedPointerField(TaggedRegister destination,
Operand field_operand);
void LoadTaggedField(TaggedRegister destination, Operand field_operand);
// Loads a field containing a Smi and decompresses it if pointer compression
// is enabled.
void LoadTaggedSignedField(Register destination, Operand field_operand);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(Register destination, Operand field_operand);
// Loads a field containing any tagged value but does not decompress it when
// pointer compression is enabled.
void LoadAnyTaggedField(TaggedRegister destination, Operand field_operand);
// Loads a field containing a HeapObject, decompresses it if necessary and
// pushes full pointer to the stack. When pointer compression is enabled,
// uses |scratch| to decompress the value.
void PushTaggedPointerField(Operand field_operand, Register scratch);
// Loads a field containing any tagged value, decompresses it if necessary and
// pushes the full pointer to the stack. When pointer compression is enabled,
// uses |scratch| to decompress the value.
void PushTaggedAnyField(Operand field_operand, Register scratch);
void PushTaggedField(Operand field_operand, Register scratch);
// Loads a field containing smi value and untags it.
void SmiUntagField(Register dst, Operand src);
@ -626,10 +612,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
// The following macros work even when pointer compression is not enabled.
void DecompressTaggedSigned(Register destination, Operand field_operand);
void DecompressTaggedPointer(Register destination, Operand field_operand);
void DecompressTaggedPointer(Register destination, Register source);
void DecompressTaggedPointer(Register destination, Tagged_t immediate);
void DecompressAnyTagged(Register destination, Operand field_operand);
void DecompressTagged(Register destination, Operand field_operand);
void DecompressTagged(Register destination, Register source);
void DecompressTagged(Register destination, Tagged_t immediate);
// ---------------------------------------------------------------------------
// V8 Sandbox support
@ -653,23 +638,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
IsolateRootLocation isolateRootLocation =
IsolateRootLocation::kInRootRegister);
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi value);
// Drops arguments assuming that the return address was already popped.
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
ArgumentsCountMode mode = kCountExcludesReceiver);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// Loads and stores the value of an external reference.
// Special case code for load and store to take advantage of
// load_rax/store_rax if possible/necessary.
@ -781,7 +749,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Macro instructions.
using TurboAssembler::Cmp;
void Cmp(Register dst, Handle<Object> source);
void Cmp(Operand dst, Handle<Object> source);
@ -945,6 +912,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// In-place weak references.
void LoadWeakValue(Register in_out, Label* target_if_cleared);
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi value);
// Drops arguments assuming that the return address was already popped.
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
ArgumentsCountMode mode = kCountExcludesReceiver);
private:
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,

View File

@ -2035,7 +2035,8 @@ enum IsolateAddressId {
V(TrapNullDereference) \
V(TrapIllegalCast) \
V(TrapArrayOutOfBounds) \
V(TrapArrayTooLarge)
V(TrapArrayTooLarge) \
V(TrapStringOffsetOutOfBounds)
enum KeyedAccessLoadMode {
STANDARD_LOAD,

View File

@ -69,8 +69,8 @@ Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
// static
template <typename TOnHeapAddress>
Address V8HeapCompressionScheme::DecompressTaggedPointer(
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
V8_ASSUME((base_ & kPtrComprCageBaseMask) == base_);
@ -79,19 +79,15 @@ Address V8HeapCompressionScheme::DecompressTaggedPointer(
// For V8_ASSUME_ALIGNED to be considered for optimizations the following
// addition has to happen on a pointer type.
Address result = reinterpret_cast<Address>(cage_base + raw_value);
V8_ASSUME(static_cast<uint32_t>(result) == raw_value);
return result;
#else
Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr);
return cage_base + static_cast<Address>(raw_value);
Address result = cage_base + static_cast<Address>(raw_value);
#endif
}
// static
template <typename TOnHeapAddress>
Address V8HeapCompressionScheme::DecompressTaggedAny(
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
return DecompressTaggedPointer(on_heap_addr, raw_value);
// Allows to remove compress(decompress(...))
V8_ASSUME(static_cast<uint32_t>(result) == raw_value);
// Allows to remove SMI checks when the result is compared against a constant.
V8_ASSUME(HAS_SMI_TAG(result) == HAS_SMI_TAG(raw_value));
return result;
}
// static
@ -102,10 +98,10 @@ void V8HeapCompressionScheme::ProcessIntermediatePointers(
// If pointer compression is enabled, we may have random compressed pointers
// on the stack that may be used for subsequent operations.
// Extract, decompress and trace both halfwords.
Address decompressed_low = V8HeapCompressionScheme::DecompressTaggedPointer(
Address decompressed_low = V8HeapCompressionScheme::DecompressTagged(
cage_base, static_cast<Tagged_t>(raw_value));
callback(decompressed_low);
Address decompressed_high = V8HeapCompressionScheme::DecompressTaggedPointer(
Address decompressed_high = V8HeapCompressionScheme::DecompressTagged(
cage_base,
static_cast<Tagged_t>(raw_value >> (sizeof(Tagged_t) * CHAR_BIT)));
callback(decompressed_high);
@ -162,7 +158,7 @@ Address ExternalCodeCompressionScheme::DecompressTaggedSigned(
// static
template <typename TOnHeapAddress>
Address ExternalCodeCompressionScheme::DecompressTaggedPointer(
Address ExternalCodeCompressionScheme::DecompressTagged(
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
@ -172,19 +168,15 @@ Address ExternalCodeCompressionScheme::DecompressTaggedPointer(
// For V8_ASSUME_ALIGNED to be considered for optimizations the following
// addition has to happen on a pointer type.
Address result = reinterpret_cast<Address>(cage_base + raw_value);
V8_ASSUME(static_cast<uint32_t>(result) == raw_value);
return result;
#else
Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr);
return cage_base + static_cast<Address>(raw_value);
Address result = cage_base + static_cast<Address>(raw_value);
#endif
}
// static
template <typename TOnHeapAddress>
Address ExternalCodeCompressionScheme::DecompressTaggedAny(
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
return DecompressTaggedPointer(on_heap_addr, raw_value);
// Allows to remove compress(decompress(...))
V8_ASSUME(static_cast<uint32_t>(result) == raw_value);
// Allows to remove SMI checks when the result is compared against a constant.
V8_ASSUME(HAS_SMI_TAG(result) == HAS_SMI_TAG(raw_value));
return result;
}
#endif // V8_EXTERNAL_CODE_SPACE
@ -223,15 +215,8 @@ Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
// static
template <typename TOnHeapAddress>
Address V8HeapCompressionScheme::DecompressTaggedPointer(
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
UNREACHABLE();
}
// static
template <typename TOnHeapAddress>
Address V8HeapCompressionScheme::DecompressTaggedAny(
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
UNREACHABLE();
}

View File

@ -29,15 +29,10 @@ class V8HeapCompressionScheme {
// Decompresses smi value.
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
// Decompresses weak or strong heap object pointer or forwarding pointer,
// preserving both weak- and smi- tags.
template <typename TOnHeapAddress>
V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
Tagged_t raw_value);
// Decompresses any tagged value, preserving both weak- and smi- tags.
template <typename TOnHeapAddress>
V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
Tagged_t raw_value);
V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr,
Tagged_t raw_value);
// Given a 64bit raw value, found on the stack, calls the callback function
// with all possible pointers that may be "contained" in compressed form in
@ -82,15 +77,10 @@ class ExternalCodeCompressionScheme {
// Decompresses smi value.
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
// Decompresses weak or strong heap object pointer or forwarding pointer,
// preserving both weak- and smi- tags.
template <typename TOnHeapAddress>
V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
Tagged_t raw_value);
// Decompresses any tagged value, preserving both weak- and smi- tags.
template <typename TOnHeapAddress>
V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
Tagged_t raw_value);
V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr,
Tagged_t raw_value);
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Process-wide cage base value used for decompression.

View File

@ -29,7 +29,7 @@ namespace v8 {
namespace internal {
namespace compiler {
#define __ tasm()->
#define __ masm()->
// Adds Arm-specific methods to convert InstructionOperands.
class ArmOperandConverter final : public InstructionOperandConverter {
@ -415,7 +415,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
FrameScope scope(tasm(), StackFrame::MANUAL); \
FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@ -429,7 +429,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
FrameScope scope(tasm(), StackFrame::MANUAL); \
FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@ -473,7 +473,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
if (instr->InputAt(1)->IsImmediate()) { \
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
} else { \
UseScratchRegisterScope temps(tasm()); \
UseScratchRegisterScope temps(masm()); \
Simd128Register tmp = temps.AcquireQ(); \
Register shift = temps.Acquire(); \
constexpr int mask = (1 << width) - 1; \
@ -493,7 +493,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
if (instr->InputAt(1)->IsImmediate()) { \
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
} else { \
UseScratchRegisterScope temps(tasm()); \
UseScratchRegisterScope temps(masm()); \
Simd128Register tmp = temps.AcquireQ(); \
Register shift = temps.Acquire(); \
constexpr int mask = (1 << width) - 1; \
@ -518,20 +518,20 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
void FlushPendingPushRegisters(TurboAssembler* tasm,
void FlushPendingPushRegisters(MacroAssembler* masm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
tasm->push((*pending_pushes)[0]);
masm->push((*pending_pushes)[0]);
break;
case 2:
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@ -542,7 +542,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
}
void AdjustStackPointerForTailCall(
TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@ -550,15 +550,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
FlushPendingPushRegisters(masm, state, pending_pushes);
}
tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
FlushPendingPushRegisters(masm, state, pending_pushes);
}
tasm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
masm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@ -601,7 +601,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
tasm(), frame_access_state(),
masm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
// Pushes of non-register data types are not supported.
@ -611,26 +611,26 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
// TODO(arm): We can push more than 3 registers at once. Add support in
// the macro-assembler for pushing a list of registers.
if (pending_pushes.size() == 3) {
FlushPendingPushRegisters(tasm(), frame_access_state(),
FlushPendingPushRegisters(masm(), frame_access_state(),
&pending_pushes);
}
move->Eliminate();
}
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
}
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ ComputeCodeStartAddress(scratch);
__ cmp(scratch, kJavaScriptCallCodeStartRegister);
@ -645,7 +645,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
@ -747,7 +747,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallJSFunction: {
Register func = i.InputRegister(0);
if (v8_flags.debug_code) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Check the function's context matches the context argument.
__ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
@ -858,7 +858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@ -1069,7 +1069,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(2), i.OutputSBit());
break;
case kArmMls: {
CpuFeatureScope scope(tasm(), ARMv7);
CpuFeatureScope scope(masm(), ARMv7);
__ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@ -1093,13 +1093,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(1), i.OutputSBit());
break;
case kArmSdiv: {
CpuFeatureScope scope(tasm(), SUDIV);
CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmUdiv: {
CpuFeatureScope scope(tasm(), SUDIV);
CpuFeatureScope scope(masm(), SUDIV);
__ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@ -1127,20 +1127,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.OutputSBit());
break;
case kArmBfc: {
CpuFeatureScope scope(tasm(), ARMv7);
CpuFeatureScope scope(masm(), ARMv7);
__ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmUbfx: {
CpuFeatureScope scope(tasm(), ARMv7);
CpuFeatureScope scope(masm(), ARMv7);
__ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmSbfx: {
CpuFeatureScope scope(tasm(), ARMv7);
CpuFeatureScope scope(masm(), ARMv7);
__ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@ -1183,7 +1183,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmRbit: {
CpuFeatureScope scope(tasm(), ARMv7);
CpuFeatureScope scope(masm(), ARMv7);
__ rbit(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@ -1378,7 +1378,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmVmodF64: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
FrameScope scope(tasm(), StackFrame::MANUAL);
FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
@ -1398,7 +1398,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintmF32: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintm(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0));
@ -1408,12 +1408,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVrintmF64: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
__ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintpF32: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintp(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0));
@ -1423,12 +1423,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVrintpF64: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
__ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintzF32: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintz(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0));
@ -1438,17 +1438,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVrintzF64: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
__ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintaF64: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
__ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintnF32: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintn(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0));
@ -1458,7 +1458,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVrintnF64: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
__ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
@ -1473,7 +1473,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtF32S32: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
@ -1481,7 +1481,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtF32U32: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
@ -1489,7 +1489,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtF64S32: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
@ -1497,7 +1497,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtF64U32: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
@ -1505,7 +1505,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtS32F32: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
@ -1520,7 +1520,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtU32F32: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
@ -1535,7 +1535,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtS32F64: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
@ -1543,7 +1543,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtU32F64: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
@ -1762,7 +1762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vldr(i.OutputFloatRegister(), MemOperand(fp, offset));
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ add(scratch, fp, Operand(offset));
__ vld1(Neon8, NeonListOperand(i.OutputSimd128Register()),
@ -1899,7 +1899,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#undef ASSEMBLE_F64X2_ARITHMETIC_BINOP
case kArmF64x2Eq: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
@ -1915,7 +1915,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2Ne: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
@ -1931,7 +1931,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2Lt: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
i.InputSimd128Register(1).low());
@ -1947,7 +1947,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2Le: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
i.InputSimd128Register(1).low());
@ -1989,7 +1989,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2Ceil: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ vrintp(dst.low(), src.low());
@ -1997,7 +1997,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2Floor: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ vrintm(dst.low(), src.low());
@ -2005,7 +2005,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2Trunc: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ vrintz(dst.low(), src.low());
@ -2013,7 +2013,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2NearestInt: {
CpuFeatureScope scope(tasm(), ARMv8);
CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ vrintn(dst.low(), src.low());
@ -2060,7 +2060,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI64x2Mul: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
QwNeonRegister dst = i.OutputSimd128Register();
QwNeonRegister left = i.InputSimd128Register(0);
QwNeonRegister right = i.InputSimd128Register(1);
@ -2447,7 +2447,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI32x4BitMask: {
Register dst = i.OutputRegister();
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register src = i.InputSimd128Register(0);
Simd128Register tmp = temps.AcquireQ();
Simd128Register mask = i.TempSimd128Register(0);
@ -2468,7 +2468,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register lhs = i.InputSimd128Register(0);
Simd128Register rhs = i.InputSimd128Register(1);
Simd128Register tmp1 = i.TempSimd128Register(0);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
__ vmull(NeonS16, tmp1, lhs.low(), rhs.low());
__ vmull(NeonS16, scratch, lhs.high(), rhs.high());
@ -2650,7 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI16x8BitMask: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
Simd128Register tmp = temps.AcquireQ();
@ -2805,7 +2805,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI8x16BitMask: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
Simd128Register tmp = temps.AcquireQ();
@ -2906,7 +2906,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
__ vmov(scratch, src1);
@ -2917,7 +2917,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft).
__ vmov(scratch, src1);
@ -2928,7 +2928,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
__ vmov(scratch, src1);
@ -2961,7 +2961,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS32x4TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft).
@ -2990,7 +2990,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
@ -3001,7 +3001,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
@ -3012,7 +3012,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
@ -3023,7 +3023,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
@ -3052,7 +3052,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
@ -3063,7 +3063,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
@ -3074,7 +3074,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
@ -3085,7 +3085,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
@ -3112,7 +3112,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
DwVfpRegister table_base = src0.low();
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
// If unary shuffle, table is src0 (2 d-registers), otherwise src0 and
// src1. They must be consecutive.
@ -3163,7 +3163,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmV128AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD();
__ vpmax(NeonU32, scratch, src.low(), src.high());
__ vpmax(NeonU32, scratch, scratch, scratch);
@ -3178,7 +3178,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI32x4AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD();
__ vpmin(NeonU32, scratch, src.low(), src.high());
__ vpmin(NeonU32, scratch, scratch, scratch);
@ -3189,7 +3189,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI16x8AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD();
__ vpmin(NeonU16, scratch, src.low(), src.high());
__ vpmin(NeonU16, scratch, scratch, scratch);
@ -3201,7 +3201,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI8x16AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD();
__ vpmin(NeonU8, scratch, src.low(), src.high());
__ vpmin(NeonU8, scratch, scratch, scratch);
@ -3747,7 +3747,7 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ ldr(scratch, FieldMemOperand(
kWasmInstanceRegister,
@ -3873,8 +3873,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ cmp(argc_reg, Operand(parameter_slots));
__ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
}
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(argc_reg, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
@ -3944,7 +3944,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsDoubleRegister()) {
__ vstr(g.ToDoubleRegister(source), dst);
} else {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire();
QwNeonRegister src = g.ToSimd128Register(source);
__ add(temp, dst.rn(), Operand(dst.offset()));
@ -3965,7 +3965,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsDoubleStackSlot()) {
__ vldr(g.ToDoubleRegister(destination), src);
} else {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire();
QwNeonRegister dst = g.ToSimd128Register(destination);
__ add(temp, src.rn(), Operand(src.offset()));
@ -3976,7 +3976,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case MoveType::kStackToStack: {
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
SwVfpRegister temp = temps.AcquireS();
__ vldr(temp, src);
@ -4014,27 +4014,27 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination);
if (destination->IsStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
// Acquire a S register instead of a general purpose register in case
// `vstr` needs one to compute the address of `dst`.
SwVfpRegister s_temp = temps.AcquireS();
{
// TODO(arm): This sequence could be optimized further if necessary by
// writing the constant directly into `s_temp`.
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire();
MoveConstantToRegister(temp, src);
__ vmov(s_temp, temp);
}
__ vstr(s_temp, dst);
} else if (destination->IsFloatStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
SwVfpRegister temp = temps.AcquireS();
__ vmov(temp, Float32::FromBits(src.ToFloat32AsInt()));
__ vstr(temp, dst);
} else {
DCHECK(destination->IsDoubleStackSlot());
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
DwVfpRegister temp = temps.AcquireD();
// TODO(arm): Look into optimizing this further if possible. Supporting
// the NEON version of VMOV may help.
@ -4060,7 +4060,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
__ push(g.ToRegister(source));
frame_access_state()->IncreaseSPDelta(new_slots);
} else if (source->IsStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ ldr(scratch, g.ToMemOperand(source));
__ push(scratch);
@ -4083,7 +4083,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
if (dest->IsRegister()) {
__ pop(g.ToRegister(dest));
} else if (dest->IsStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ pop(scratch);
__ str(scratch, g.ToMemOperand(dest));
@ -4110,7 +4110,7 @@ void CodeGenerator::PopTempStackSlots() {
void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}.
move_cycle_.temps.emplace(tasm());
move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick a
// location to resolve the cycle. Re-include them immediately afterwards so
@ -4184,7 +4184,7 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
InstructionOperand& destination = move->destination();
MoveType::Type move_type =
MoveType::InferMove(&move->source(), &move->destination());
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
if (move_type == MoveType::kStackToStack) {
if (source.IsStackSlot() || source.IsFloatStackSlot()) {
SwVfpRegister temp = temps.AcquireS();
@ -4224,7 +4224,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
DCHECK(destination->IsFloatRegister());
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
LowDwVfpRegister temp = temps.AcquireLowD();
int src_code = LocationOperand::cast(source)->register_code();
int dst_code = LocationOperand::cast(destination)->register_code();
@ -4241,20 +4241,20 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand dst = g.ToMemOperand(destination);
if (source->IsRegister()) {
Register src = g.ToRegister(source);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
SwVfpRegister temp = temps.AcquireS();
__ vmov(temp, src);
__ ldr(src, dst);
__ vstr(temp, dst);
} else if (source->IsFloatRegister()) {
int src_code = LocationOperand::cast(source)->register_code();
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
LowDwVfpRegister temp = temps.AcquireLowD();
__ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst);
__ vstr(temp.low(), dst);
} else if (source->IsDoubleRegister()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
DwVfpRegister temp = temps.AcquireD();
DwVfpRegister src = g.ToDoubleRegister(source);
__ Move(temp, src);
@ -4262,7 +4262,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vstr(temp, dst);
} else {
QwNeonRegister src = g.ToSimd128Register(source);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire();
QwNeonRegister temp_q = temps.AcquireQ();
__ Move(temp_q, src);
@ -4276,7 +4276,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
SwVfpRegister temp_0 = temps.AcquireS();
SwVfpRegister temp_1 = temps.AcquireS();
__ vldr(temp_0, dst);
@ -4284,7 +4284,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vstr(temp_0, src);
__ vstr(temp_1, dst);
} else if (source->IsDoubleStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
LowDwVfpRegister temp = temps.AcquireLowD();
if (temps.CanAcquireD()) {
DwVfpRegister temp_0 = temp;
@ -4317,7 +4317,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand dst0 = dst;
MemOperand src1(src.rn(), src.offset() + kDoubleSize);
MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
DwVfpRegister temp_0 = temps.AcquireD();
DwVfpRegister temp_1 = temps.AcquireD();
__ vldr(temp_0, dst0);

View File

@ -397,7 +397,7 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue());
input_count = 1;
inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
@ -753,7 +753,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue());
int input_count = 2;
InstructionOperand inputs[2];

View File

@ -24,7 +24,7 @@ namespace v8 {
namespace internal {
namespace compiler {
#define __ tasm()->
#define __ masm()->
// Adds Arm64-specific methods to convert InstructionOperands.
class Arm64OperandConverter final : public InstructionOperandConverter {
@ -238,13 +238,13 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
MemOperand ToMemOperand(InstructionOperand* op, TurboAssembler* tasm) const {
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm);
return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
}
MemOperand SlotToMemOperand(int slot, TurboAssembler* tasm) const {
MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
if (offset.from_frame_pointer()) {
int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
@ -284,7 +284,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
void Generate() final {
if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_);
__ DecompressTagged(value_, value_);
}
__ CheckPageFlag(
value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
@ -294,7 +294,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: SaveFPRegsMode::kIgnore;
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Push<TurboAssembler::kSignLR>(lr, padreg);
__ Push<MacroAssembler::kSignLR>(lr, padreg);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
}
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
@ -311,7 +311,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode);
}
if (must_save_lr_) {
__ Pop<TurboAssembler::kAuthLR>(padreg, lr);
__ Pop<MacroAssembler::kAuthLR>(padreg, lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
}
}
@ -459,14 +459,14 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
// Handles unary ops that work for float (scalar), double (scalar), or NEON.
template <typename Fn>
void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
void EmitFpOrNeonUnop(MacroAssembler* masm, Fn fn, Instruction* instr,
Arm64OperandConverter i, VectorFormat scalar,
VectorFormat vector) {
VectorFormat f = instr->InputAt(0)->IsSimd128Register() ? vector : scalar;
VRegister output = VRegister::Create(i.OutputDoubleRegister().code(), f);
VRegister input = VRegister::Create(i.InputDoubleRegister(0).code(), f);
(tasm->*fn)(output, input);
(masm->*fn)(output, input);
}
} // namespace
@ -539,13 +539,13 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
FrameScope scope(masm(), StackFrame::MANUAL); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
} while (0)
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
FrameScope scope(masm(), StackFrame::MANUAL); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
} while (0)
@ -558,7 +558,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
__ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \
UseScratchRegisterScope temps(tasm()); \
UseScratchRegisterScope temps(masm()); \
VRegister tmp = temps.AcquireQ(); \
Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \
@ -578,7 +578,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
__ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \
UseScratchRegisterScope temps(tasm()); \
UseScratchRegisterScope temps(masm()); \
VRegister tmp = temps.AcquireQ(); \
Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \
@ -592,7 +592,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
void CodeGenerator::AssembleDeconstructFrame() {
__ Mov(sp, fp);
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
__ Pop<MacroAssembler::kAuthLR>(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
}
@ -606,7 +606,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@ -615,10 +615,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
DCHECK_EQ(stack_slot_delta % 2, 0);
if (stack_slot_delta > 0) {
tasm->Claim(stack_slot_delta);
masm->Claim(stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
tasm->Drop(-stack_slot_delta);
masm->Drop(-stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@ -627,14 +627,14 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
DCHECK_EQ(first_unused_slot_offset % 2, 0);
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
DCHECK(instr->IsTailCall());
InstructionOperandConverter g(this, instr);
@ -646,7 +646,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ ComputeCodeStartAddress(scratch);
__ cmp(scratch, kJavaScriptCallCodeStartRegister);
@ -705,7 +705,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(wasm_code, constant.rmode());
} else {
Register target = i.InputRegister(0);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
temps.Exclude(x17);
__ Mov(x17, target);
__ Jump(x17);
@ -737,7 +737,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
temps.Exclude(x17);
__ Mov(x17, reg);
__ Jump(x17);
@ -750,16 +750,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (v8_flags.debug_code) {
// Check the function's context matches the context argument.
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
__ LoadTaggedPointerField(
temp, FieldMemOperand(func, JSFunction::kContextOffset));
__ LoadTaggedField(temp,
FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, temp);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadTaggedPointerField(x2,
FieldMemOperand(func, JSFunction::kCodeOffset));
__ LoadTaggedField(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(x2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@ -860,7 +859,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(BUILTIN_CODE(isolate(), AbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@ -1051,39 +1050,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(tanh);
break;
case kArm64Float32RoundDown:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatS,
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundDown:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatD,
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float32RoundUp:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatS,
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundUp:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatD,
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float64RoundTiesAway:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frinta, instr, i, kFormatD,
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frinta, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float32RoundTruncate:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatS,
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundTruncate:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatD,
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float32RoundTiesEven:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatS,
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundTiesEven:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatD,
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Add:
@ -1314,14 +1313,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
break;
case kArm64Imod: {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
__ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break;
}
case kArm64Imod32: {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireW();
__ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@ -1329,14 +1328,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64Umod: {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
__ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break;
}
case kArm64Umod32: {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireW();
__ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@ -1650,7 +1649,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64Float64Mod: {
// TODO(turbofan): implement directly.
FrameScope scope(tasm(), StackFrame::MANUAL);
FrameScope scope(masm(), StackFrame::MANUAL);
DCHECK_EQ(d0, i.InputDoubleRegister(0));
DCHECK_EQ(d1, i.InputDoubleRegister(1));
DCHECK_EQ(d0, i.OutputDoubleRegister());
@ -1890,23 +1889,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64LdrDecompressTaggedSigned:
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64LdrDecompressTaggedPointer:
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64LdrDecompressAnyTagged:
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
case kArm64LdrDecompressTagged:
__ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64LdarDecompressTaggedSigned:
__ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
break;
case kArm64LdarDecompressTaggedPointer:
__ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
break;
case kArm64LdarDecompressAnyTagged:
__ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
case kArm64LdarDecompressTagged:
__ AtomicDecompressTagged(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
break;
case kArm64LdrDecodeSandboxedPointer:
__ LoadSandboxedPointerField(i.OutputRegister(), i.MemoryOperand());
@ -2369,7 +2361,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add);
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub);
case kArm64I64x2Mul: {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister dst = i.OutputSimd128Register();
VRegister src1 = i.InputSimd128Register(0);
VRegister src2 = i.InputSimd128Register(1);
@ -2470,7 +2462,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi);
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs);
case kArm64I32x4BitMask: {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ();
@ -2486,7 +2478,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I32x4DotI16x8S: {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister lhs = i.InputSimd128Register(0);
VRegister rhs = i.InputSimd128Register(1);
VRegister tmp1 = scope.AcquireV(kFormat4S);
@ -2497,7 +2489,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I16x8DotI8x16S: {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister lhs = i.InputSimd128Register(0);
VRegister rhs = i.InputSimd128Register(1);
VRegister tmp1 = scope.AcquireV(kFormat8H);
@ -2515,7 +2507,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).V16B());
} else {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister lhs = i.InputSimd128Register(0);
VRegister rhs = i.InputSimd128Register(1);
VRegister tmp1 = scope.AcquireV(kFormat8H);
@ -2553,7 +2545,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src1) {
__ Mov(temp, src1.V4S());
@ -2574,7 +2566,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src1) {
__ Mov(temp, src1.V4S());
@ -2588,7 +2580,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub);
SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
case kArm64I16x8BitMask: {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ();
@ -2615,7 +2607,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat8H);
if (dst == src1) {
__ Mov(temp, src1.V8H());
@ -2633,7 +2625,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat8H);
if (dst == src1) {
__ Mov(temp, src1.V8H());
@ -2644,7 +2636,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I8x16BitMask: {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ();
@ -2733,7 +2725,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src1 = i.InputSimd128Register(1).V4S();
// Check for in-place shuffles.
// If dst == src0 == src1, then the shuffle is unary and we only use src0.
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src0) {
__ Mov(temp, src0);
@ -2799,7 +2791,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0
: 0xE0E0E0E0E0E0E0E0));
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat16B);
__ Movi(temp, imm2, imm1);
@ -2878,7 +2870,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64V128AnyTrue: {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
// For AnyTrue, the format does not matter; also, we would like to avoid
// an expensive horizontal reduction.
VRegister temp = scope.AcquireV(kFormat4S);
@ -2891,7 +2883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
case Op: { \
UseScratchRegisterScope scope(tasm()); \
UseScratchRegisterScope scope(masm()); \
VRegister temp = scope.AcquireV(format); \
__ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
__ Umov(i.OutputRegister32(), temp, 0); \
@ -3045,7 +3037,7 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
Arm64OperandConverter i(this, instr);
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register input = i.InputRegister32(0);
Register temp = scope.AcquireX();
size_t const case_count = instr->InputCount() - 2;
@ -3066,7 +3058,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
{
const size_t instruction_count =
case_count * instructions_per_case + instructions_per_jump_target;
TurboAssembler::BlockPoolsScope block_pools(tasm(),
MacroAssembler::BlockPoolsScope block_pools(masm(),
instruction_count * kInstrSize);
__ Bind(&table);
for (size_t index = 0; index < case_count; ++index) {
@ -3125,10 +3117,10 @@ void CodeGenerator::AssembleConstructFrame() {
DCHECK_EQ(required_slots % 2, 1);
__ Prologue();
// Update required_slots count since we have just claimed one extra slot.
static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1);
required_slots -= TurboAssembler::kExtraSlotClaimedByPrologue;
static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1);
required_slots -= MacroAssembler::kExtraSlotClaimedByPrologue;
} else {
__ Push<TurboAssembler::kSignLR>(lr, fp);
__ Push<MacroAssembler::kSignLR>(lr, fp);
__ Mov(fp, sp);
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
@ -3151,7 +3143,7 @@ void CodeGenerator::AssembleConstructFrame() {
// One unoptimized frame slot has already been claimed when the actual
// arguments count was pushed.
required_slots -=
unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue;
unoptimized_frame_slots - MacroAssembler::kExtraSlotClaimedByPrologue;
}
#if V8_ENABLE_WEBASSEMBLY
@ -3165,7 +3157,7 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register scratch = scope.AcquireX();
__ Ldr(scratch, FieldMemOperand(
kWasmInstanceRegister,
@ -3178,7 +3170,7 @@ void CodeGenerator::AssembleConstructFrame() {
{
// Finish the frame that hasn't been fully built yet.
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@ -3209,7 +3201,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Claim(required_slots);
break;
case CallDescriptor::kCallCodeObject: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@ -3225,7 +3217,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
#if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmFunction: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@ -3235,7 +3227,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
case CallDescriptor::kCallWasmImportWrapper:
case CallDescriptor::kCallWasmCapiFunction: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@ -3254,7 +3246,7 @@ void CodeGenerator::AssembleConstructFrame() {
case CallDescriptor::kCallAddress:
#if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY));
__ Push(scratch, padreg);
@ -3392,7 +3384,7 @@ void CodeGenerator::PrepareForDeoptimizationExits(
}
// Emit the jumps to deoptimization entries.
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register scratch = scope.AcquireX();
static_assert(static_cast<int>(kFirstDeoptimizeKind) == 0);
for (int i = 0; i < kDeoptimizeKindCount; i++) {
@ -3417,9 +3409,9 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
__ Push(padreg, g.ToRegister(source));
frame_access_state()->IncreaseSPDelta(new_slots);
} else if (source->IsStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Ldr(scratch, g.ToMemOperand(source, tasm()));
__ Ldr(scratch, g.ToMemOperand(source, masm()));
__ Push(padreg, scratch);
frame_access_state()->IncreaseSPDelta(new_slots);
} else {
@ -3440,10 +3432,10 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
if (dest->IsRegister()) {
__ Pop(g.ToRegister(dest), padreg);
} else if (dest->IsStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Pop(scratch, padreg);
__ Str(scratch, g.ToMemOperand(dest, tasm()));
__ Str(scratch, g.ToMemOperand(dest, masm()));
} else {
int last_frame_slot_id =
frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
@ -3468,7 +3460,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate());
move_cycle_.temps.emplace(tasm());
move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick one to
// resolve the move cycle. Re-include them immediately afterwards as they
@ -3506,7 +3498,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
scratch_reg.code());
Arm64OperandConverter g(this, nullptr);
if (source->IsStackSlot()) {
__ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, tasm()));
__ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, masm()));
} else {
DCHECK(source->IsRegister());
__ fmov(g.ToDoubleRegister(&scratch), g.ToRegister(source));
@ -3535,7 +3527,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
move_cycle_.scratch_reg->code());
Arm64OperandConverter g(this, nullptr);
if (dest->IsStackSlot()) {
__ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, tasm()));
__ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, masm()));
} else {
DCHECK(dest->IsRegister());
__ fmov(g.ToRegister(dest), g.ToDoubleRegister(&scratch));
@ -3557,9 +3549,9 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
auto move_type = MoveType::InferMove(&move->source(), &move->destination());
if (move_type == MoveType::kStackToStack) {
Arm64OperandConverter g(this, nullptr);
MemOperand src = g.ToMemOperand(&move->source(), tasm());
MemOperand dst = g.ToMemOperand(&move->destination(), tasm());
UseScratchRegisterScope temps(tasm());
MemOperand src = g.ToMemOperand(&move->source(), masm());
MemOperand dst = g.ToMemOperand(&move->destination(), masm());
UseScratchRegisterScope temps(masm());
if (move->source().IsSimd128StackSlot()) {
VRegister temp = temps.AcquireQ();
move_cycle_.scratch_fp_regs.set(temp);
@ -3574,11 +3566,11 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
// Offset doesn't fit into the immediate field so the assembler will emit
// two instructions and use a second temp register.
if ((src.IsImmediateOffset() &&
!tasm()->IsImmLSScaled(src_offset, src_size) &&
!tasm()->IsImmLSUnscaled(src_offset)) ||
!masm()->IsImmLSScaled(src_offset, src_size) &&
!masm()->IsImmLSUnscaled(src_offset)) ||
(dst.IsImmediateOffset() &&
!tasm()->IsImmLSScaled(dst_offset, dst_size) &&
!tasm()->IsImmLSUnscaled(dst_offset))) {
!masm()->IsImmLSScaled(dst_offset, dst_size) &&
!masm()->IsImmLSUnscaled(dst_offset))) {
Register temp = temps.AcquireX();
move_cycle_.scratch_regs.set(temp);
}
@ -3627,7 +3619,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
return;
case MoveType::kRegisterToStack: {
MemOperand dst = g.ToMemOperand(destination, tasm());
MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsRegister()) {
__ Str(g.ToRegister(source), dst);
} else {
@ -3642,7 +3634,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
return;
}
case MoveType::kStackToRegister: {
MemOperand src = g.ToMemOperand(source, tasm());
MemOperand src = g.ToMemOperand(source, masm());
if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src);
} else {
@ -3657,15 +3649,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
return;
}
case MoveType::kStackToStack: {
MemOperand src = g.ToMemOperand(source, tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
MemOperand src = g.ToMemOperand(source, masm());
MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsSimd128StackSlot()) {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireQ();
__ Ldr(temp, src);
__ Str(temp, dst);
} else {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
__ Ldr(temp, src);
__ Str(temp, dst);
@ -3689,9 +3681,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination, tasm());
MemOperand dst = g.ToMemOperand(destination, masm());
if (destination->IsStackSlot()) {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
MoveConstantToRegister(temp, src);
__ Str(temp, dst);
@ -3699,7 +3691,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ Str(wzr, dst);
} else {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
__ Str(temp, dst);
@ -3709,7 +3701,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (src.ToFloat64().AsUint64() == 0) {
__ Str(xzr, dst);
} else {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireD();
__ Fmov(temp, src.ToFloat64().value());
__ Str(temp, dst);
@ -3740,8 +3732,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
return;
case MoveType::kRegisterToStack: {
UseScratchRegisterScope scope(tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
UseScratchRegisterScope scope(masm());
MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsRegister()) {
Register temp = scope.AcquireX();
Register src = g.ToRegister(source);
@ -3749,7 +3741,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Ldr(src, dst);
__ Str(temp, dst);
} else {
UseScratchRegisterScope scope(tasm());
UseScratchRegisterScope scope(masm());
VRegister src = g.ToDoubleRegister(source);
if (source->IsFloatRegister() || source->IsDoubleRegister()) {
VRegister temp = scope.AcquireD();
@ -3767,9 +3759,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
return;
}
case MoveType::kStackToStack: {
UseScratchRegisterScope scope(tasm());
MemOperand src = g.ToMemOperand(source, tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
UseScratchRegisterScope scope(masm());
MemOperand src = g.ToMemOperand(source, masm());
MemOperand dst = g.ToMemOperand(destination, masm());
VRegister temp_0 = scope.AcquireD();
VRegister temp_1 = scope.AcquireD();
if (source->IsSimd128StackSlot()) {

View File

@ -199,11 +199,9 @@ namespace compiler {
V(Arm64Float64MoveU64) \
V(Arm64U64MoveFloat64) \
V(Arm64LdrDecompressTaggedSigned) \
V(Arm64LdrDecompressTaggedPointer) \
V(Arm64LdrDecompressAnyTagged) \
V(Arm64LdrDecompressTagged) \
V(Arm64LdarDecompressTaggedSigned) \
V(Arm64LdarDecompressTaggedPointer) \
V(Arm64LdarDecompressAnyTagged) \
V(Arm64LdarDecompressTagged) \
V(Arm64StrCompressTagged) \
V(Arm64StlrCompressTagged) \
V(Arm64LdrDecodeSandboxedPointer) \

View File

@ -315,11 +315,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrW:
case kArm64Ldr:
case kArm64LdrDecompressTaggedSigned:
case kArm64LdrDecompressTaggedPointer:
case kArm64LdrDecompressAnyTagged:
case kArm64LdrDecompressTagged:
case kArm64LdarDecompressTaggedSigned:
case kArm64LdarDecompressTaggedPointer:
case kArm64LdarDecompressAnyTagged:
case kArm64LdarDecompressTagged:
case kArm64LdrDecodeSandboxedPointer:
case kArm64Peek:
case kArm64LoadSplat:
@ -431,8 +429,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return 1;
case kArm64LdrDecompressTaggedSigned:
case kArm64LdrDecompressTaggedPointer:
case kArm64LdrDecompressAnyTagged:
case kArm64LdrDecompressTagged:
case kArm64Ldr:
case kArm64LdrD:
case kArm64LdrS:

View File

@ -623,7 +623,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue());
input_count = 1;
// Check that the delta is a 32-bit integer due to the limitations of
@ -843,11 +843,8 @@ void InstructionSelector::VisitLoad(Node* node) {
immediate_mode = kLoadStoreImm32;
break;
case MachineRepresentation::kTaggedPointer:
opcode = kArm64LdrDecompressTaggedPointer;
immediate_mode = kLoadStoreImm32;
break;
case MachineRepresentation::kTagged:
opcode = kArm64LdrDecompressAnyTagged;
opcode = kArm64LdrDecompressTagged;
immediate_mode = kLoadStoreImm32;
break;
#else
@ -988,7 +985,7 @@ void InstructionSelector::VisitStore(Node* node) {
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
isolate(), m.ResolvedValue());
if (is_int32(delta)) {
input_count = 2;
@ -2773,10 +2770,10 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
code = kArm64LdarDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
code = kArm64LdarDecompressTaggedPointer;
code = kArm64LdarDecompressTagged;
break;
case MachineRepresentation::kTagged:
code = kArm64LdarDecompressAnyTagged;
code = kArm64LdarDecompressTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.

View File

@ -266,14 +266,14 @@ class OutOfLineCode : public ZoneObject {
Label* entry() { return &entry_; }
Label* exit() { return &exit_; }
const Frame* frame() const { return frame_; }
TurboAssembler* tasm() { return tasm_; }
MacroAssembler* masm() { return masm_; }
OutOfLineCode* next() const { return next_; }
private:
Label entry_;
Label exit_;
const Frame* const frame_;
TurboAssembler* const tasm_;
MacroAssembler* const masm_;
OutOfLineCode* const next_;
};

View File

@ -64,7 +64,7 @@ CodeGenerator::CodeGenerator(
current_block_(RpoNumber::Invalid()),
start_source_position_(start_source_position),
current_source_position_(SourcePosition::Unknown()),
tasm_(isolate, options, CodeObjectRequired::kNo,
masm_(isolate, options, CodeObjectRequired::kNo,
#if V8_ENABLE_WEBASSEMBLY
buffer_cache ? buffer_cache->GetAssemblerBuffer(
AssemblerBase::kDefaultBufferSize)
@ -98,15 +98,15 @@ CodeGenerator::CodeGenerator(
}
CreateFrameAccessState(frame);
CHECK_EQ(info->is_osr(), osr_helper_.has_value());
tasm_.set_jump_optimization_info(jump_opt);
masm_.set_jump_optimization_info(jump_opt);
CodeKind code_kind = info->code_kind();
if (code_kind == CodeKind::WASM_FUNCTION ||
code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
code_kind == CodeKind::WASM_TO_JS_FUNCTION ||
code_kind == CodeKind::JS_TO_WASM_FUNCTION) {
tasm_.set_abort_hard(true);
masm_.set_abort_hard(true);
}
tasm_.set_builtin(builtin);
masm_.set_builtin(builtin);
}
bool CodeGenerator::wasm_runtime_exception_support() const {
@ -173,19 +173,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Label* jump_deoptimization_entry_label =
&jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
if (info()->source_positions()) {
tasm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
masm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
exit->pos(), deoptimization_id);
}
if (deopt_kind == DeoptimizeKind::kLazy) {
++lazy_deopt_count_;
tasm()->BindExceptionHandler(exit->label());
masm()->BindExceptionHandler(exit->label());
} else {
++eager_deopt_count_;
tasm()->bind(exit->label());
masm()->bind(exit->label());
}
Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind);
tasm()->CallForDeoptimization(target, deoptimization_id, exit->label(),
masm()->CallForDeoptimization(target, deoptimization_id, exit->label(),
deopt_kind, exit->continue_label(),
jump_deoptimization_entry_label);
@ -195,7 +195,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
}
void CodeGenerator::MaybeEmitOutOfLineConstantPool() {
tasm()->MaybeEmitOutOfLineConstantPool();
masm()->MaybeEmitOutOfLineConstantPool();
}
void CodeGenerator::AssembleCode() {
@ -204,27 +204,27 @@ void CodeGenerator::AssembleCode() {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in AssemblePrologue).
FrameScope frame_scope(tasm(), StackFrame::MANUAL);
FrameScope frame_scope(masm(), StackFrame::MANUAL);
if (info->source_positions()) {
AssembleSourcePosition(start_source_position());
}
offsets_info_.code_start_register_check = tasm()->pc_offset();
offsets_info_.code_start_register_check = masm()->pc_offset();
tasm()->CodeEntry();
masm()->CodeEntry();
// Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
if (v8_flags.debug_code && info->called_with_code_start_register()) {
tasm()->RecordComment("-- Prologue: check code start register --");
masm()->RecordComment("-- Prologue: check code start register --");
AssembleCodeStartRegisterCheck();
}
offsets_info_.deopt_check = tasm()->pc_offset();
offsets_info_.deopt_check = masm()->pc_offset();
// We want to bailout only from JS functions, which are the only ones
// that are optimized.
if (info->IsOptimizing()) {
DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
tasm()->RecordComment("-- Prologue: check for deoptimization --");
masm()->RecordComment("-- Prologue: check for deoptimization --");
BailoutIfDeoptimized();
}
@ -258,22 +258,22 @@ void CodeGenerator::AssembleCode() {
instr_starts_.assign(instructions()->instructions().size(), {});
}
// Assemble instructions in assembly order.
offsets_info_.blocks_start = tasm()->pc_offset();
offsets_info_.blocks_start = masm()->pc_offset();
for (const InstructionBlock* block : instructions()->ao_blocks()) {
// Align loop headers on vendor recommended boundaries.
if (!tasm()->jump_optimization_info()) {
if (!masm()->jump_optimization_info()) {
if (block->ShouldAlignLoopHeader()) {
tasm()->LoopHeaderAlign();
masm()->LoopHeaderAlign();
} else if (block->ShouldAlignCodeTarget()) {
tasm()->CodeTargetAlign();
masm()->CodeTargetAlign();
}
}
if (info->trace_turbo_json()) {
block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
block_starts_[block->rpo_number().ToInt()] = masm()->pc_offset();
}
// Bind a label for a block.
current_block_ = block->rpo_number();
unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block);
if (v8_flags.code_comments) {
std::ostringstream buffer;
buffer << "-- B" << block->rpo_number().ToInt() << " start";
@ -289,12 +289,12 @@ void CodeGenerator::AssembleCode() {
buffer << " (in loop " << block->loop_header().ToInt() << ")";
}
buffer << " --";
tasm()->RecordComment(buffer.str().c_str());
masm()->RecordComment(buffer.str().c_str());
}
frame_access_state()->MarkHasFrame(block->needs_frame());
tasm()->bind(GetLabel(current_block_));
masm()->bind(GetLabel(current_block_));
if (block->must_construct_frame()) {
AssembleConstructFrame();
@ -303,7 +303,7 @@ void CodeGenerator::AssembleCode() {
// using the roots.
// TODO(mtrofin): investigate how we can avoid doing this repeatedly.
if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
tasm()->InitializeRootRegister();
masm()->InitializeRootRegister();
}
}
#ifdef V8_TARGET_ARCH_RISCV64
@ -312,10 +312,10 @@ void CodeGenerator::AssembleCode() {
// back between blocks. the Rvv instruction may get an incorrect vtype. so
// here VectorUnit needs to be cleared to ensure that the vtype is correct
// within the block.
tasm()->VU.clear();
masm()->VU.clear();
#endif
if (V8_EMBEDDED_CONSTANT_POOL_BOOL && !block->needs_frame()) {
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
result_ = AssembleBlock(block);
} else {
result_ = AssembleBlock(block);
@ -325,29 +325,29 @@ void CodeGenerator::AssembleCode() {
}
// Assemble all out-of-line code.
offsets_info_.out_of_line_code = tasm()->pc_offset();
offsets_info_.out_of_line_code = masm()->pc_offset();
if (ools_) {
tasm()->RecordComment("-- Out of line code --");
masm()->RecordComment("-- Out of line code --");
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
tasm()->bind(ool->entry());
masm()->bind(ool->entry());
ool->Generate();
if (ool->exit()->is_bound()) tasm()->jmp(ool->exit());
if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
}
}
// This nop operation is needed to ensure that the trampoline is not
// confused with the pc of the call before deoptimization.
// The test regress/regress-259 is an example of where we need it.
tasm()->nop();
masm()->nop();
// For some targets, we must make sure that constant and veneer pools are
// emitted before emitting the deoptimization exits.
PrepareForDeoptimizationExits(&deoptimization_exits_);
deopt_exit_start_offset_ = tasm()->pc_offset();
deopt_exit_start_offset_ = masm()->pc_offset();
// Assemble deoptimization exits.
offsets_info_.deoptimization_exits = tasm()->pc_offset();
offsets_info_.deoptimization_exits = masm()->pc_offset();
int last_updated = 0;
// We sort the deoptimization exits here so that the lazy ones will be visited
// last. We need this as lazy deopts might need additional instructions.
@ -367,7 +367,7 @@ void CodeGenerator::AssembleCode() {
{
#ifdef V8_TARGET_ARCH_PPC64
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
tasm());
masm());
#endif
for (DeoptimizationExit* exit : deoptimization_exits_) {
if (exit->emitted()) continue;
@ -388,19 +388,19 @@ void CodeGenerator::AssembleCode() {
}
}
offsets_info_.pools = tasm()->pc_offset();
offsets_info_.pools = masm()->pc_offset();
// TODO(jgruber): Move all inlined metadata generation into a new,
// architecture-independent version of FinishCode. Currently, this includes
// the safepoint table, handler table, constant pool, and code comments, in
// that order.
FinishCode();
offsets_info_.jump_tables = tasm()->pc_offset();
offsets_info_.jump_tables = masm()->pc_offset();
// Emit the jump tables.
if (jump_tables_) {
tasm()->Align(kSystemPointerSize);
masm()->Align(kSystemPointerSize);
for (JumpTable* table = jump_tables_; table; table = table->next()) {
tasm()->bind(table->label());
masm()->bind(table->label());
AssembleJumpTable(table->targets(), table->target_count());
}
}
@ -408,34 +408,35 @@ void CodeGenerator::AssembleCode() {
// The LinuxPerfJitLogger logs code up until here, excluding the safepoint
// table. Resolve the unwinding info now so it is aware of the same code
// size as reported by perf.
unwinding_info_writer_.Finish(tasm()->pc_offset());
unwinding_info_writer_.Finish(masm()->pc_offset());
// Final alignment before starting on the metadata section.
tasm()->Align(InstructionStream::kMetadataAlignment);
masm()->Align(InstructionStream::kMetadataAlignment);
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
// Emit the exception handler table.
if (!handlers_.empty()) {
handler_table_offset_ = HandlerTable::EmitReturnTableStart(tasm());
handler_table_offset_ = HandlerTable::EmitReturnTableStart(masm());
for (size_t i = 0; i < handlers_.size(); ++i) {
HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset,
HandlerTable::EmitReturnEntry(masm(), handlers_[i].pc_offset,
handlers_[i].handler->pos());
}
}
tasm()->MaybeEmitOutOfLineConstantPool();
tasm()->FinalizeJumpOptimizationInfo();
masm()->MaybeEmitOutOfLineConstantPool();
masm()->FinalizeJumpOptimizationInfo();
result_ = kSuccess;
}
#ifndef V8_TARGET_ARCH_X64
void CodeGenerator::AssembleArchBinarySearchSwitchRange(
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end) {
if (end - begin < kBinarySearchSwitchMinimalCases) {
while (begin != end) {
tasm()->JumpIfEqual(input, begin->first, begin->second);
masm()->JumpIfEqual(input, begin->first, begin->second);
++begin;
}
AssembleArchJumpRegardlessOfAssemblyOrder(def_block);
@ -443,11 +444,12 @@ void CodeGenerator::AssembleArchBinarySearchSwitchRange(
}
auto middle = begin + (end - begin) / 2;
Label less_label;
tasm()->JumpIfLessThan(input, middle->first, &less_label);
masm()->JumpIfLessThan(input, middle->first, &less_label);
AssembleArchBinarySearchSwitchRange(input, def_block, middle, end);
tasm()->bind(&less_label);
masm()->bind(&less_label);
AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
}
#endif // V8_TARGET_ARCH_X64
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target))
@ -469,7 +471,7 @@ base::OwnedVector<byte> CodeGenerator::GetProtectedInstructionsData() {
MaybeHandle<Code> CodeGenerator::FinalizeCode() {
if (result_ != kSuccess) {
tasm()->AbortedCodeGeneration();
masm()->AbortedCodeGeneration();
return {};
}
@ -482,11 +484,11 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
// Allocate and install the code.
CodeDesc desc;
tasm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_);
masm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_);
#if defined(V8_OS_WIN64)
if (Builtins::IsBuiltinId(info_->builtin())) {
isolate_->SetBuiltinUnwindData(info_->builtin(), tasm()->GetUnwindInfo());
isolate_->SetBuiltinUnwindData(info_->builtin(), masm()->GetUnwindInfo());
}
#endif // V8_OS_WIN64
@ -508,7 +510,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
tasm()->AbortedCodeGeneration();
masm()->AbortedCodeGeneration();
return {};
}
@ -527,7 +529,7 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
}
void CodeGenerator::RecordSafepoint(ReferenceMap* references) {
auto safepoint = safepoints()->DefineSafepoint(tasm());
auto safepoint = safepoints()->DefineSafepoint(masm());
int frame_header_offset = frame()->GetFixedSlotCount();
for (const InstructionOperand& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
@ -558,7 +560,7 @@ bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object,
CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
const InstructionBlock* block) {
if (block->IsHandler()) {
tasm()->ExceptionHandler();
masm()->ExceptionHandler();
}
for (int i = block->code_start(); i < block->code_end(); ++i) {
CodeGenResult result = AssembleInstruction(i, block);
@ -718,7 +720,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
int instruction_index, const InstructionBlock* block) {
Instruction* instr = instructions()->InstructionAt(instruction_index);
if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset();
instr_starts_[instruction_index].gap_pc_offset = masm()->pc_offset();
}
int first_unused_stack_slot;
FlagsMode mode = FlagsModeField::decode(instr->opcode());
@ -738,14 +740,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
AssembleDeconstructFrame();
}
if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset();
instr_starts_[instruction_index].arch_instr_pc_offset = masm()->pc_offset();
}
// Assemble architecture-specific code for the instruction.
CodeGenResult result = AssembleArchInstruction(instr);
if (result != kSuccess) return result;
if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset();
instr_starts_[instruction_index].condition_pc_offset = masm()->pc_offset();
}
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
@ -779,7 +781,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
branch.false_label = exit->continue_label();
branch.fallthru = true;
AssembleArchDeoptBranch(instr, &branch);
tasm()->bind(exit->continue_label());
masm()->bind(exit->continue_label());
break;
}
case kFlags_set: {
@ -818,7 +820,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
if (source_position == current_source_position_) return;
current_source_position_ = source_position;
if (!source_position.IsKnown()) return;
source_position_table_builder_.AddPosition(tasm()->pc_offset(),
source_position_table_builder_.AddPosition(masm()->pc_offset(),
source_position, false);
if (v8_flags.code_comments) {
OptimizedCompilationInfo* info = this->info();
@ -833,8 +835,8 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
buffer << "-- ";
// Turbolizer only needs the source position, as it can reconstruct
// the inlining stack from other information.
if (info->trace_turbo_json() || !tasm()->isolate() ||
tasm()->isolate()->concurrent_recompilation_enabled()) {
if (info->trace_turbo_json() || !masm()->isolate() ||
masm()->isolate()->concurrent_recompilation_enabled()) {
buffer << source_position;
} else {
AllowGarbageCollection allocation;
@ -843,7 +845,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
buffer << source_position.InliningStack(info);
}
buffer << " --";
tasm()->RecordComment(buffer.str().c_str());
masm()->RecordComment(buffer.str().c_str());
}
}
@ -981,7 +983,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
handlers_.push_back(
{GetLabel(handler_rpo), tasm()->pc_offset_for_safepoint()});
{GetLabel(handler_rpo), masm()->pc_offset_for_safepoint()});
}
if (needs_frame_state) {
@ -991,7 +993,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
int pc_offset = tasm()->pc_offset_for_safepoint();
int pc_offset = masm()->pc_offset_for_safepoint();
BuildTranslation(instr, pc_offset, frame_state_offset, 0,
descriptor->state_combine());
}
@ -1325,7 +1327,7 @@ void CodeGenerator::AddTranslationForOperand(Instruction* instr,
}
void CodeGenerator::MarkLazyDeoptSite() {
last_lazy_deopt_pc_ = tasm()->pc_offset();
last_lazy_deopt_pc_ = masm()->pc_offset();
}
DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
@ -1336,7 +1338,7 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
}
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
: frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
gen->ools_ = this;
}

View File

@ -188,7 +188,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
void RecordSafepoint(ReferenceMap* references);
Zone* zone() const { return zone_; }
TurboAssembler* tasm() { return &tasm_; }
MacroAssembler* masm() { return &masm_; }
SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; }
size_t handler_table_offset() const { return handler_table_offset_; }
@ -278,9 +278,15 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
#if V8_ENABLE_WEBASSEMBLY
void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
#endif // V8_ENABLE_WEBASSEMBLY
#if V8_TARGET_ARCH_X64
void AssembleArchBinarySearchSwitchRange(
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end, base::Optional<int32_t>& last_cmp_value);
#else
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block,
std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end);
#endif // V8_TARGET_ARCH_X64
void AssembleArchBinarySearchSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
@ -448,7 +454,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
RpoNumber current_block_;
SourcePosition start_source_position_;
SourcePosition current_source_position_;
TurboAssembler tasm_;
MacroAssembler masm_;
GapResolver resolver_;
SafepointTableBuilder safepoints_;
ZoneVector<HandlerInfo> handlers_;

View File

@ -29,7 +29,7 @@ namespace v8 {
namespace internal {
namespace compiler {
#define __ tasm()->
#define __ masm()->
#define kScratchDoubleReg xmm0
@ -202,11 +202,11 @@ class IA32OperandConverter : public InstructionOperandConverter {
void MoveInstructionOperandToRegister(Register destination,
InstructionOperand* op) {
if (op->IsImmediate() || op->IsConstant()) {
gen_->tasm()->mov(destination, ToImmediate(op));
gen_->masm()->mov(destination, ToImmediate(op));
} else if (op->IsRegister()) {
gen_->tasm()->Move(destination, ToRegister(op));
gen_->masm()->Move(destination, ToRegister(op));
} else {
gen_->tasm()->mov(destination, ToOperand(op));
gen_->masm()->mov(destination, ToOperand(op));
}
}
};
@ -475,7 +475,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
XMMRegister src0 = i.InputSimd128Register(0); \
Operand src1 = i.InputOperand(instr->InputCount() == 2 ? 1 : 0); \
if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \
CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(i.OutputSimd128Register(), src0, src1); \
} else { \
DCHECK_EQ(i.OutputSimd128Register(), src0); \
@ -485,11 +485,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm) \
if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \
CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \
i.InputOperand(1), imm); \
} else { \
CpuFeatureScope sse_scope(tasm(), SSELevel); \
CpuFeatureScope sse_scope(masm(), SSELevel); \
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
__ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \
}
@ -532,26 +532,25 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
int8_t laneidx = i.InputInt8(1); \
if (HasAddressingMode(instr)) { \
if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \
CpuFeatureScope avx_scope(masm(), AVX); \
__ v##OPCODE(dst, src, i.MemoryOperand(2), laneidx); \
} else { \
DCHECK_EQ(dst, src); \
CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \
CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \
__ OPCODE(dst, i.MemoryOperand(2), laneidx); \
} \
} else { \
if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \
CpuFeatureScope avx_scope(masm(), AVX); \
__ v##OPCODE(dst, src, i.InputOperand(2), laneidx); \
} else { \
DCHECK_EQ(dst, src); \
CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \
CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \
__ OPCODE(dst, i.InputOperand(2), laneidx); \
} \
} \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@ -566,7 +565,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@ -574,10 +573,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
tasm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize));
masm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@ -617,7 +616,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand destination_location(
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
AdjustStackPointerForTailCall(masm(), frame_access_state(),
destination_location.index());
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
@ -635,13 +634,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
move->Eliminate();
}
}
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
@ -884,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@ -1262,7 +1261,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kIA32Float32Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
CpuFeatureScope sse_scope(masm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@ -2112,12 +2111,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32Insertps: {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
__ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(2), i.InputInt8(1) << 4);
} else {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ insertps(i.OutputSimd128Register(), i.InputOperand(2),
i.InputInt8(1) << 4);
}
@ -2315,12 +2314,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src1 = i.InputSimd128Register(0);
XMMRegister src2 = i.InputSimd128Register(1);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
__ vpminsd(kScratchDoubleReg, src1, src2);
__ vpcmpeqd(dst, kScratchDoubleReg, src2);
} else {
DCHECK_EQ(dst, src1);
CpuFeatureScope sse_scope(tasm(), SSE4_1);
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pminsd(dst, src2);
__ pcmpeqd(dst, src2);
}
@ -2328,7 +2327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI32x4UConvertF32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister tmp = i.TempSimd128Register(0);
// NAN->0, negative->0
@ -2356,7 +2355,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kAVXI32x4UConvertF32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister tmp = i.TempSimd128Register(0);
// NAN->0, negative->0
@ -2406,7 +2405,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI32x4GtU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pmaxud(dst, src);
@ -2416,7 +2415,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI32x4GtU: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
@ -2428,7 +2427,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI32x4GeU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminud(dst, src);
@ -2436,7 +2435,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI32x4GeU: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminud(kScratchDoubleReg, src1, src2);
@ -2552,7 +2551,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI16x8Ne: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
__ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
@ -2574,7 +2573,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI16x8GeS: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminsw(kScratchDoubleReg, src1, src2);
@ -2621,7 +2620,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI16x8GtU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pmaxuw(dst, src);
@ -2631,7 +2630,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI16x8GtU: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
@ -2643,7 +2642,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI16x8GeU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminuw(dst, src);
@ -2651,7 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI16x8GeU: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminuw(kScratchDoubleReg, src1, src2);
@ -2844,7 +2843,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI8x16Ne: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
__ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
@ -2859,7 +2858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI8x16GeS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminsb(dst, src);
@ -2867,7 +2866,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI8x16GeS: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminsb(kScratchDoubleReg, src1, src2);
@ -2925,7 +2924,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI8x16GtU: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
@ -2944,7 +2943,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI8x16GeU: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminub(kScratchDoubleReg, src1, src2);
@ -3183,7 +3182,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src = i.InputSimd128Register(0);
uint8_t lane = i.InputUint8(1) & 0xf;
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
if (lane < 8) {
__ vpunpcklbw(dst, src, src);
} else {
@ -3234,7 +3233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklbw);
break;
case kSSES16x8UnzipHigh: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
DCHECK_EQ(dst, i.InputSimd128Register(0));
@ -3248,7 +3247,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS16x8UnzipHigh: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
if (instr->InputCount() == 2) {
@ -3260,7 +3259,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSES16x8UnzipLow: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
DCHECK_EQ(dst, i.InputSimd128Register(0));
@ -3274,7 +3273,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS16x8UnzipLow: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
__ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
@ -3301,7 +3300,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS8x16UnzipHigh: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
if (instr->InputCount() == 2) {
@ -3328,7 +3327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS8x16UnzipLow: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
if (instr->InputCount() == 2) {
@ -3357,7 +3356,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS8x16TransposeLow: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
if (instr->InputCount() == 1) {
__ vpsllw(kScratchDoubleReg, i.InputSimd128Register(0), 8);
@ -3387,7 +3386,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS8x16TransposeHigh: {
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
if (instr->InputCount() == 1) {
__ vpsrlw(dst, i.InputSimd128Register(0), 8);
@ -3423,7 +3422,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAVXS8x4Reverse:
case kAVXS8x8Reverse: {
DCHECK_EQ(1, instr->InputCount());
CpuFeatureScope avx_scope(tasm(), AVX);
CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = dst;
if (arch_opcode != kAVXS8x2Reverse) {
@ -4205,8 +4204,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(argc_reg, scratch_reg, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {

View File

@ -18,7 +18,7 @@
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/turbo-assembler.h"
#include "src/codegen/macro-assembler-base.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/backend/instruction-selector-impl.h"
@ -208,7 +208,7 @@ class IA32OperandGenerator final : public OperandGenerator {
m.object().ResolvedValue())) {
ptrdiff_t const delta =
m.index().ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector()->isolate(), m.object().ResolvedValue());
if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));

View File

@ -451,7 +451,7 @@ bool InstructionSelector::CanAddressRelativeToRootsRegister(
// 3. IsAddressableThroughRootRegister: Is the target address guaranteed to
// have a fixed root-relative offset? If so, we can ignore 2.
const bool this_root_relative_offset_is_constant =
TurboAssemblerBase::IsAddressableThroughRootRegister(isolate(),
MacroAssemblerBase::IsAddressableThroughRootRegister(isolate(),
reference);
return this_root_relative_offset_is_constant;
}

View File

@ -23,7 +23,7 @@ namespace v8 {
namespace internal {
namespace compiler {
#define __ tasm()->
#define __ masm()->
// TODO(LOONG_dev): consider renaming these macros.
#define TRACE_MSG(msg) \
@ -450,8 +450,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
UseScratchRegisterScope temps(tasm()); \
FrameScope scope(masm(), StackFrame::MANUAL); \
UseScratchRegisterScope temps(masm()); \
Register scratch = temps.Acquire(); \
__ PrepareCallCFunction(0, 2, scratch); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
@ -459,8 +459,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
UseScratchRegisterScope temps(tasm()); \
FrameScope scope(masm(), StackFrame::MANUAL); \
UseScratchRegisterScope temps(masm()); \
Register scratch = temps.Acquire(); \
__ PrepareCallCFunction(0, 1, scratch); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@ -487,7 +487,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@ -495,10 +495,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize);
masm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize);
masm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@ -507,19 +507,19 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ ComputeCodeStartAddress(scratch);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
@ -534,7 +534,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
@ -628,7 +628,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallJSFunction: {
Register func = i.InputRegister(0);
if (v8_flags.debug_code) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Check the function's context matches the context argument.
__ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
@ -642,7 +642,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchPrepareCallCFunction: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
int const num_gp_parameters = ParamField::decode(instr->opcode());
int const num_fp_parameters = FPParamField::decode(instr->opcode());
@ -749,7 +749,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@ -829,7 +829,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
DCHECK_EQ(addressing_mode, kMode_MRI);
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ Add_d(scratch, object, Operand(i.InputInt64(1)));
__ amswap_db_d(zero_reg, value, scratch);
@ -843,7 +843,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchStackSlot: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
@ -1225,8 +1225,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kLoong64Float64Mod: {
// TODO(turbofan): implement directly.
FrameScope scope(tasm(), StackFrame::MANUAL);
UseScratchRegisterScope temps(tasm());
FrameScope scope(masm(), StackFrame::MANUAL);
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ PrepareCallCFunction(0, 2, scratch);
__ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
@ -1363,7 +1363,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0));
__ movfr2gr_s(i.OutputRegister(), scratch_d);
if (set_overflow_to_min_i32) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
// because INT32_MIN allows easier out-of-bounds detection.
@ -1392,7 +1392,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kLoong64Float64ToInt64: {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
FPURegister scratch_d = kScratchDoubleReg;
@ -1438,7 +1438,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
__ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
if (set_overflow_to_min_i32) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
// because 0 allows easier out-of-bounds detection.
@ -1863,11 +1863,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
<< "\""; \
UNIMPLEMENTED();
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
#undef __
#define __ tasm->
#define __ masm->
Loong64OperandConverter i(gen, instr);
// LOONG64 does not have condition code flags, so compare and branch are
@ -1882,7 +1882,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
__ Branch(tlabel, cc, t8, Operand(zero_reg));
} else if (instr->arch_opcode() == kLoong64Add_d ||
instr->arch_opcode() == kLoong64Sub_d) {
UseScratchRegisterScope temps(tasm);
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
Condition cc = FlagsConditionToConditionOvf(condition);
@ -1941,7 +1941,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
}
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
#undef __
#define __ tasm()->
#define __ masm()->
}
// Assembles branches after an instruction.
@ -1949,7 +1949,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
branch->fallthru);
}
@ -2014,7 +2014,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
};
auto ool = zone()->New<OutOfLineTrap>(this, instr);
Label* tlabel = ool->entry();
AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
}
#endif // V8_ENABLE_WEBASSEMBLY
@ -2041,7 +2041,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
return;
} else if (instr->arch_opcode() == kLoong64Add_d ||
instr->arch_opcode() == kLoong64Sub_d) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
Condition cc = FlagsConditionToConditionOvf(condition);
// Check for overflow creates 1 or 0 for result.
@ -2289,7 +2289,7 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ Ld_d(scratch, FieldMemOperand(
kWasmInstanceRegister,
@ -2444,7 +2444,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
__ Push(g.ToRegister(source));
frame_access_state()->IncreaseSPDelta(new_slots);
} else if (source->IsStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ Ld_d(scratch, g.ToMemOperand(source));
__ Push(scratch);
@ -2467,7 +2467,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
if (dest->IsRegister()) {
__ Pop(g.ToRegister(dest));
} else if (dest->IsStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ Pop(scratch);
__ St_d(scratch, g.ToMemOperand(dest));
@ -2495,7 +2495,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate());
move_cycle_.temps.emplace(tasm());
move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick one to
// resolve the move cycle. Re-include them immediately afterwards as they
@ -2585,7 +2585,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
void CodeGenerator::SetPendingMove(MoveOperands* move) {
InstructionOperand* src = &move->source();
InstructionOperand* dst = &move->destination();
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
if (src->IsConstant() || (src->IsStackSlot() && dst->IsStackSlot())) {
Register temp = temps.Acquire();
move_cycle_.scratch_regs.set(temp);
@ -2642,7 +2642,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsRegister()) {
__ Ld_d(g.ToRegister(destination), src);
} else {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ Ld_d(scratch, src);
__ St_d(scratch, g.ToMemOperand(destination));
@ -2650,7 +2650,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsConstant()) {
Constant src = g.ToConstant(source);
if (destination->IsRegister() || destination->IsStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
Register dst =
destination->IsRegister() ? g.ToRegister(destination) : scratch;
@ -2697,7 +2697,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ St_d(zero_reg, dst);
} else {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ li(scratch, Operand(base::bit_cast<int32_t>(src.ToFloat32())));
__ St_d(scratch, dst);
@ -2748,7 +2748,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Register-register.
Register src = g.ToRegister(source);
@ -2770,7 +2770,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
// Since the Ld instruction may need a scratch reg,
// we should not use both of the two scratch registers in
// UseScratchRegisterScope here.
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
FPURegister scratch_d = kScratchDoubleReg;
MemOperand src = g.ToMemOperand(source);
@ -2796,7 +2796,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
FPURegister scratch_d = kScratchDoubleReg;
MemOperand src = g.ToMemOperand(source);

View File

@ -360,7 +360,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue());
// Check that the delta is a 32-bit integer due to the limitations of
// immediate operands.
@ -560,7 +560,7 @@ void InstructionSelector::VisitStore(Node* node) {
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
isolate(), m.ResolvedValue());
// Check that the delta is a 32-bit integer due to the limitations of
// immediate operands.
@ -1398,21 +1398,33 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
// On LoongArch64, int32 values should all be sign-extended to 64-bit, so
// no need to sign-extend them here.
// But when call to a host function in simulator, if the function return an
// int32 value, the simulator do not sign-extend to int64, because in
// simulator we do not know the function whether return an int32 or int64.
#ifdef USE_SIMULATOR
Node* value = node->InputAt(0);
if (value->opcode() == IrOpcode::kCall) {
if ((value->opcode() == IrOpcode::kLoad ||
value->opcode() == IrOpcode::kLoadImmutable) &&
CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
break;
case MachineRepresentation::kWord32:
opcode = kLoong64Ld_w;
break;
default:
UNREACHABLE();
}
EmitLoad(this, value, opcode, node);
} else {
Loong64OperandGenerator g(this);
Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value),
g.TempImmediate(0));
return;
}
#endif
EmitIdentity(node);
}
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {

File diff suppressed because it is too large Load Diff

View File

@ -775,7 +775,7 @@ int PrepareForTailCallLatency() {
int AssertLatency() { return 1; }
int PrepareCallCFunctionLatency() {
int frame_alignment = TurboAssembler::ActivationFrameAlignment();
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (frame_alignment > kSystemPointerSize) {
return 1 + DsubuLatency(false) + AndLatency(false) + 1;
} else {

View File

@ -1481,21 +1481,33 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
// On MIPS64, int32 values should all be sign-extended to 64-bit, so
// no need to sign-extend them here.
// But when call to a host function in simulator, if the function return an
// int32 value, the simulator do not sign-extend to int64, because in
// simulator we do not know the function whether return an int32 or int64.
#ifdef USE_SIMULATOR
Node* value = node->InputAt(0);
if (value->opcode() == IrOpcode::kCall) {
if ((value->opcode() == IrOpcode::kLoad ||
value->opcode() == IrOpcode::kLoadImmutable) &&
CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
case MachineRepresentation::kWord32:
opcode = kMips64Lw;
break;
default:
UNREACHABLE();
}
EmitLoad(this, value, opcode, node);
} else {
Mips64OperandGenerator g(this);
Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
g.TempImmediate(0));
return;
}
#endif
EmitIdentity(node);
}
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {

View File

@ -23,7 +23,7 @@ namespace v8 {
namespace internal {
namespace compiler {
#define __ tasm()->
#define __ masm()->
#define kScratchReg r11
@ -170,9 +170,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_);
__ DecompressTagged(value_, value_);
}
__ CheckPageFlag(
value_, scratch0_,
@ -409,7 +409,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#define ASSEMBLE_FLOAT_MODULO() \
do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@ -422,7 +422,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
FrameScope scope(tasm(), StackFrame::MANUAL); \
FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@ -435,7 +435,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
FrameScope scope(tasm(), StackFrame::MANUAL); \
FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@ -680,20 +680,20 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
void FlushPendingPushRegisters(TurboAssembler* tasm,
void FlushPendingPushRegisters(MacroAssembler* masm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
tasm->Push((*pending_pushes)[0]);
masm->Push((*pending_pushes)[0]);
break;
case 2:
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@ -704,7 +704,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
}
void AdjustStackPointerForTailCall(
TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@ -712,15 +712,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
FlushPendingPushRegisters(masm, state, pending_pushes);
}
tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
FlushPendingPushRegisters(masm, state, pending_pushes);
}
tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@ -742,7 +742,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
tasm(), frame_access_state(),
masm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
// Pushes of non-register data types are not supported.
@ -752,20 +752,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
// TODO(arm): We can push more than 3 registers at once. Add support in
// the macro-assembler for pushing a list of registers.
if (pending_pushes.size() == 3) {
FlushPendingPushRegisters(tasm(), frame_access_state(),
FlushPendingPushRegisters(masm(), frame_access_state(),
&pending_pushes);
}
move->Eliminate();
}
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
}
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
@ -793,8 +793,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
}
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ LoadTaggedPointerField(
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
__ LoadTaggedField(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset),
r0);
__ LoadS32(r11, FieldMemOperand(r11, Code::kKindSpecificFlagsOffset), r0);
__ TestBit(r11, InstructionStream::kMarkedForDeoptimizationBit);
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
@ -810,7 +810,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
switch (opcode) {
case kArchCallCodeObject: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
tasm());
masm());
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
@ -883,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@ -904,18 +904,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallJSFunction: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
tasm());
masm());
Register func = i.InputRegister(0);
if (v8_flags.debug_code) {
// Check the function's context matches the context argument.
__ LoadTaggedPointerField(
__ LoadTaggedField(
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset), r0);
__ CmpS64(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadTaggedPointerField(
r5, FieldMemOperand(func, JSFunction::kCodeOffset), r0);
__ LoadTaggedField(r5, FieldMemOperand(func, JSFunction::kCodeOffset),
r0);
__ CallCodeObject(r5);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@ -1058,7 +1058,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@ -2880,13 +2880,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
break;
}
case kPPC_LoadDecompressTaggedPointer: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
__ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister);
break;
}
case kPPC_LoadDecompressAnyTagged: {
case kPPC_LoadDecompressTagged: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
__ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister);
@ -3320,7 +3314,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
AssembleDeconstructFrame();
}
// Constant pool is unavailable since the frame has been destructed
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver).
// The number of arguments without the receiver is
@ -3334,8 +3328,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ mov(argc_reg, Operand(parameter_slots));
__ bind(&skip);
}
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ DropArguments(argc_reg, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count);
@ -3391,7 +3385,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
frame_access_state()->IncreaseSPDelta(-new_slots);
PPCOperandConverter g(this, nullptr);
if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
UseScratchRegisterScope temps(tasm());
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ Pop(scratch);
__ StoreU64(scratch, g.ToMemOperand(dest), r0);

View File

@ -411,8 +411,7 @@ namespace compiler {
V(PPC_S128Store64Lane) \
V(PPC_StoreCompressTagged) \
V(PPC_LoadDecompressTaggedSigned) \
V(PPC_LoadDecompressTaggedPointer) \
V(PPC_LoadDecompressAnyTagged)
V(PPC_LoadDecompressTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes

View File

@ -331,8 +331,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_LoadSimd128:
case kPPC_Peek:
case kPPC_LoadDecompressTaggedSigned:
case kPPC_LoadDecompressTaggedPointer:
case kPPC_LoadDecompressAnyTagged:
case kPPC_LoadDecompressTagged:
case kPPC_S128Load8Splat:
case kPPC_S128Load16Splat:
case kPPC_S128Load32Splat:

View File

@ -214,10 +214,10 @@ static void VisitLoadCommon(InstructionSelector* selector, Node* node,
opcode = kPPC_LoadDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
opcode = kPPC_LoadDecompressTaggedPointer;
opcode = kPPC_LoadDecompressTagged;
break;
case MachineRepresentation::kTagged:
opcode = kPPC_LoadDecompressAnyTagged;
opcode = kPPC_LoadDecompressTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.

Some files were not shown because too many files have changed in this diff Show More